1 | /* |
2 | * Copyright (C) 2011-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #if ENABLE(DFG_JIT) |
29 | |
30 | #include "BlockDirectory.h" |
31 | #include "DFGAbstractInterpreter.h" |
32 | #include "DFGGenerationInfo.h" |
33 | #include "DFGInPlaceAbstractState.h" |
34 | #include "DFGJITCompiler.h" |
35 | #include "DFGOSRExit.h" |
36 | #include "DFGOSRExitJumpPlaceholder.h" |
37 | #include "DFGRegisterBank.h" |
38 | #include "DFGSilentRegisterSavePlan.h" |
39 | #include "JITMathIC.h" |
40 | #include "JITOperations.h" |
41 | #include "PutKind.h" |
42 | #include "SpillRegistersMode.h" |
43 | #include "StructureStubInfo.h" |
44 | #include "ValueRecovery.h" |
45 | #include "VirtualRegister.h" |
46 | |
47 | namespace JSC { namespace DFG { |
48 | |
49 | class GPRTemporary; |
50 | class JSValueOperand; |
51 | class SlowPathGenerator; |
52 | class SpeculativeJIT; |
53 | class SpeculateInt32Operand; |
54 | class SpeculateStrictInt32Operand; |
55 | class SpeculateDoubleOperand; |
56 | class SpeculateCellOperand; |
57 | class SpeculateBooleanOperand; |
58 | |
59 | enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandJSValue}; |
60 | |
61 | // === SpeculativeJIT === |
62 | // |
63 | // The SpeculativeJIT is used to generate a fast, but potentially |
64 | // incomplete code path for the dataflow. When code generating |
65 | // we may make assumptions about operand types, dynamically check, |
66 | // and bail-out to an alternate code path if these checks fail. |
67 | // Importantly, the speculative code path cannot be reentered once |
68 | // a speculative check has failed. This allows the SpeculativeJIT |
69 | // to propagate type information (including information that has |
70 | // only speculatively been asserted) through the dataflow. |
71 | class SpeculativeJIT { |
72 | WTF_MAKE_FAST_ALLOCATED; |
73 | |
74 | friend struct OSRExit; |
75 | private: |
76 | typedef JITCompiler::TrustedImm32 TrustedImm32; |
77 | typedef JITCompiler::Imm32 Imm32; |
78 | typedef JITCompiler::ImmPtr ImmPtr; |
79 | typedef JITCompiler::TrustedImm64 TrustedImm64; |
80 | typedef JITCompiler::Imm64 Imm64; |
81 | |
82 | // These constants are used to set priorities for spill order for |
83 | // the register allocator. |
84 | #if USE(JSVALUE64) |
85 | enum SpillOrder { |
86 | SpillOrderConstant = 1, // no spill, and cheap fill |
87 | SpillOrderSpilled = 2, // no spill |
88 | SpillOrderJS = 4, // needs spill |
89 | SpillOrderCell = 4, // needs spill |
90 | SpillOrderStorage = 4, // needs spill |
91 | SpillOrderInteger = 5, // needs spill and box |
92 | SpillOrderBoolean = 5, // needs spill and box |
93 | SpillOrderDouble = 6, // needs spill and convert |
94 | }; |
95 | #elif USE(JSVALUE32_64) |
96 | enum SpillOrder { |
97 | SpillOrderConstant = 1, // no spill, and cheap fill |
98 | SpillOrderSpilled = 2, // no spill |
99 | SpillOrderJS = 4, // needs spill |
100 | SpillOrderStorage = 4, // needs spill |
101 | SpillOrderDouble = 4, // needs spill |
102 | SpillOrderInteger = 5, // needs spill and box |
103 | SpillOrderCell = 5, // needs spill and box |
104 | SpillOrderBoolean = 5, // needs spill and box |
105 | }; |
106 | #endif |
107 | |
108 | enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly }; |
109 | |
110 | public: |
111 | SpeculativeJIT(JITCompiler&); |
112 | ~SpeculativeJIT(); |
113 | |
114 | VM& vm() |
115 | { |
116 | return *m_jit.vm(); |
117 | } |
118 | |
119 | struct TrustedImmPtr { |
120 | template <typename T> |
121 | explicit TrustedImmPtr(T* value) |
122 | : m_value(value) |
123 | { |
124 | static_assert(!std::is_base_of<JSCell, T>::value, "To use a GC pointer, the graph must be aware of it. Use SpeculativeJIT::TrustedImmPtr::weakPointer instead." ); |
125 | } |
126 | |
127 | explicit TrustedImmPtr(RegisteredStructure structure) |
128 | : m_value(structure.get()) |
129 | { } |
130 | |
131 | explicit TrustedImmPtr(std::nullptr_t) |
132 | : m_value(nullptr) |
133 | { } |
134 | |
135 | explicit TrustedImmPtr(FrozenValue* value) |
136 | : m_value(value->cell()) |
137 | { |
138 | RELEASE_ASSERT(value->value().isCell()); |
139 | } |
140 | |
141 | explicit TrustedImmPtr(size_t value) |
142 | : m_value(bitwise_cast<void*>(value)) |
143 | { |
144 | } |
145 | |
146 | static TrustedImmPtr weakPointer(Graph& graph, JSCell* cell) |
147 | { |
148 | graph.m_plan.weakReferences().addLazily(cell); |
149 | return TrustedImmPtr(bitwise_cast<size_t>(cell)); |
150 | } |
151 | |
152 | operator MacroAssembler::TrustedImmPtr() const { return m_value; } |
153 | operator MacroAssembler::TrustedImm() const { return m_value; } |
154 | |
155 | intptr_t asIntptr() |
156 | { |
157 | return m_value.asIntptr(); |
158 | } |
159 | |
160 | private: |
161 | MacroAssembler::TrustedImmPtr m_value; |
162 | }; |
163 | |
164 | bool compile(); |
165 | |
166 | void createOSREntries(); |
167 | void linkOSREntries(LinkBuffer&); |
168 | |
169 | BasicBlock* nextBlock() |
170 | { |
171 | for (BlockIndex resultIndex = m_block->index + 1; ; resultIndex++) { |
172 | if (resultIndex >= m_jit.graph().numBlocks()) |
173 | return 0; |
174 | if (BasicBlock* result = m_jit.graph().block(resultIndex)) |
175 | return result; |
176 | } |
177 | } |
178 | |
179 | #if USE(JSVALUE64) |
180 | GPRReg fillJSValue(Edge); |
181 | #elif USE(JSVALUE32_64) |
182 | bool fillJSValue(Edge, GPRReg&, GPRReg&, FPRReg&); |
183 | #endif |
184 | GPRReg fillStorage(Edge); |
185 | |
186 | // lock and unlock GPR & FPR registers. |
187 | void lock(GPRReg reg) |
188 | { |
189 | m_gprs.lock(reg); |
190 | } |
191 | void lock(FPRReg reg) |
192 | { |
193 | m_fprs.lock(reg); |
194 | } |
195 | void unlock(GPRReg reg) |
196 | { |
197 | m_gprs.unlock(reg); |
198 | } |
199 | void unlock(FPRReg reg) |
200 | { |
201 | m_fprs.unlock(reg); |
202 | } |
203 | |
204 | // Used to check whether a child node is on its last use, |
205 | // and its machine registers may be reused. |
206 | bool canReuse(Node* node) |
207 | { |
208 | return generationInfo(node).useCount() == 1; |
209 | } |
210 | bool canReuse(Node* nodeA, Node* nodeB) |
211 | { |
212 | return nodeA == nodeB && generationInfo(nodeA).useCount() == 2; |
213 | } |
214 | bool canReuse(Edge nodeUse) |
215 | { |
216 | return canReuse(nodeUse.node()); |
217 | } |
218 | GPRReg reuse(GPRReg reg) |
219 | { |
220 | m_gprs.lock(reg); |
221 | return reg; |
222 | } |
223 | FPRReg reuse(FPRReg reg) |
224 | { |
225 | m_fprs.lock(reg); |
226 | return reg; |
227 | } |
228 | |
229 | // Allocate a gpr/fpr. |
230 | GPRReg allocate() |
231 | { |
232 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) |
233 | m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset()); |
234 | #endif |
235 | VirtualRegister spillMe; |
236 | GPRReg gpr = m_gprs.allocate(spillMe); |
237 | if (spillMe.isValid()) { |
238 | #if USE(JSVALUE32_64) |
239 | GenerationInfo& info = generationInfoFromVirtualRegister(spillMe); |
240 | if ((info.registerFormat() & DataFormatJS)) |
241 | m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR()); |
242 | #endif |
243 | spill(spillMe); |
244 | } |
245 | return gpr; |
246 | } |
247 | GPRReg allocate(GPRReg specific) |
248 | { |
249 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) |
250 | m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset()); |
251 | #endif |
252 | VirtualRegister spillMe = m_gprs.allocateSpecific(specific); |
253 | if (spillMe.isValid()) { |
254 | #if USE(JSVALUE32_64) |
255 | GenerationInfo& info = generationInfoFromVirtualRegister(spillMe); |
256 | RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble); |
257 | if ((info.registerFormat() & DataFormatJS)) |
258 | m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR()); |
259 | #endif |
260 | spill(spillMe); |
261 | } |
262 | return specific; |
263 | } |
264 | GPRReg tryAllocate() |
265 | { |
266 | return m_gprs.tryAllocate(); |
267 | } |
268 | FPRReg fprAllocate() |
269 | { |
270 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) |
271 | m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset()); |
272 | #endif |
273 | VirtualRegister spillMe; |
274 | FPRReg fpr = m_fprs.allocate(spillMe); |
275 | if (spillMe.isValid()) |
276 | spill(spillMe); |
277 | return fpr; |
278 | } |
279 | |
280 | // Check whether a VirtualRegsiter is currently in a machine register. |
281 | // We use this when filling operands to fill those that are already in |
282 | // machine registers first (by locking VirtualRegsiters that are already |
283 | // in machine register before filling those that are not we attempt to |
284 | // avoid spilling values we will need immediately). |
285 | bool isFilled(Node* node) |
286 | { |
287 | return generationInfo(node).registerFormat() != DataFormatNone; |
288 | } |
289 | bool isFilledDouble(Node* node) |
290 | { |
291 | return generationInfo(node).registerFormat() == DataFormatDouble; |
292 | } |
293 | |
294 | // Called on an operand once it has been consumed by a parent node. |
295 | void use(Node* node) |
296 | { |
297 | if (!node->hasResult()) |
298 | return; |
299 | GenerationInfo& info = generationInfo(node); |
300 | |
301 | // use() returns true when the value becomes dead, and any |
302 | // associated resources may be freed. |
303 | if (!info.use(*m_stream)) |
304 | return; |
305 | |
306 | // Release the associated machine registers. |
307 | DataFormat registerFormat = info.registerFormat(); |
308 | #if USE(JSVALUE64) |
309 | if (registerFormat == DataFormatDouble) |
310 | m_fprs.release(info.fpr()); |
311 | else if (registerFormat != DataFormatNone) |
312 | m_gprs.release(info.gpr()); |
313 | #elif USE(JSVALUE32_64) |
314 | if (registerFormat == DataFormatDouble) |
315 | m_fprs.release(info.fpr()); |
316 | else if (registerFormat & DataFormatJS) { |
317 | m_gprs.release(info.tagGPR()); |
318 | m_gprs.release(info.payloadGPR()); |
319 | } else if (registerFormat != DataFormatNone) |
320 | m_gprs.release(info.gpr()); |
321 | #endif |
322 | } |
323 | void use(Edge nodeUse) |
324 | { |
325 | use(nodeUse.node()); |
326 | } |
327 | |
328 | RegisterSet usedRegisters(); |
329 | |
330 | bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin) |
331 | { |
332 | return m_jit.graph().masqueradesAsUndefinedWatchpointIsStillValid(codeOrigin); |
333 | } |
334 | bool masqueradesAsUndefinedWatchpointIsStillValid() |
335 | { |
336 | return masqueradesAsUndefinedWatchpointIsStillValid(m_currentNode->origin.semantic); |
337 | } |
338 | |
339 | void compileStoreBarrier(Node*); |
340 | |
341 | // Called by the speculative operand types, below, to fill operand to |
342 | // machine registers, implicitly generating speculation checks as needed. |
343 | GPRReg fillSpeculateInt32(Edge, DataFormat& returnFormat); |
344 | GPRReg fillSpeculateInt32Strict(Edge); |
345 | GPRReg fillSpeculateInt52(Edge, DataFormat desiredFormat); |
346 | FPRReg fillSpeculateDouble(Edge); |
347 | GPRReg fillSpeculateCell(Edge); |
348 | GPRReg fillSpeculateBoolean(Edge); |
349 | GeneratedOperandType checkGeneratedTypeForToInt32(Node*); |
350 | |
351 | void addSlowPathGenerator(std::unique_ptr<SlowPathGenerator>); |
352 | void addSlowPathGeneratorLambda(Function<void()>&&); |
353 | void runSlowPathGenerators(PCToCodeOriginMapBuilder&); |
354 | |
355 | void compile(Node*); |
356 | void noticeOSRBirth(Node*); |
357 | void bail(AbortReason); |
358 | void compileCurrentBlock(); |
359 | |
360 | void checkArgumentTypes(); |
361 | |
362 | void clearGenerationInfo(); |
363 | |
364 | // These methods are used when generating 'unexpected' |
365 | // calls out from JIT code to C++ helper routines - |
366 | // they spill all live values to the appropriate |
367 | // slots in the JSStack without changing any state |
368 | // in the GenerationInfo. |
369 | SilentRegisterSavePlan silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source); |
370 | SilentRegisterSavePlan silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source); |
371 | void silentSpill(const SilentRegisterSavePlan&); |
372 | void silentFill(const SilentRegisterSavePlan&); |
373 | |
374 | template<typename CollectionType> |
375 | void silentSpill(const CollectionType& savePlans) |
376 | { |
377 | for (unsigned i = 0; i < savePlans.size(); ++i) |
378 | silentSpill(savePlans[i]); |
379 | } |
380 | |
381 | template<typename CollectionType> |
382 | void silentFill(const CollectionType& savePlans) |
383 | { |
384 | for (unsigned i = savePlans.size(); i--;) |
385 | silentFill(savePlans[i]); |
386 | } |
387 | |
388 | template<typename CollectionType> |
389 | void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg) |
390 | { |
391 | ASSERT(plans.isEmpty()); |
392 | for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { |
393 | GPRReg gpr = iter.regID(); |
394 | if (iter.name().isValid() && gpr != exclude && gpr != exclude2) { |
395 | SilentRegisterSavePlan plan = silentSavePlanForGPR(iter.name(), gpr); |
396 | if (doSpill) |
397 | silentSpill(plan); |
398 | plans.append(plan); |
399 | } |
400 | } |
401 | for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { |
402 | if (iter.name().isValid() && iter.regID() != fprExclude) { |
403 | SilentRegisterSavePlan plan = silentSavePlanForFPR(iter.name(), iter.regID()); |
404 | if (doSpill) |
405 | silentSpill(plan); |
406 | plans.append(plan); |
407 | } |
408 | } |
409 | } |
410 | template<typename CollectionType> |
411 | void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, NoResultTag) |
412 | { |
413 | silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, InvalidFPRReg); |
414 | } |
415 | template<typename CollectionType> |
416 | void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, FPRReg exclude) |
417 | { |
418 | silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, exclude); |
419 | } |
420 | template<typename CollectionType> |
421 | void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, JSValueRegs exclude) |
422 | { |
423 | #if USE(JSVALUE32_64) |
424 | silentSpillAllRegistersImpl(doSpill, plans, exclude.tagGPR(), exclude.payloadGPR()); |
425 | #else |
426 | silentSpillAllRegistersImpl(doSpill, plans, exclude.gpr()); |
427 | #endif |
428 | } |
429 | |
430 | void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg) |
431 | { |
432 | silentSpillAllRegistersImpl(true, m_plans, exclude, exclude2, fprExclude); |
433 | } |
434 | void silentSpillAllRegisters(FPRReg exclude) |
435 | { |
436 | silentSpillAllRegisters(InvalidGPRReg, InvalidGPRReg, exclude); |
437 | } |
438 | void silentSpillAllRegisters(JSValueRegs exclude) |
439 | { |
440 | #if USE(JSVALUE64) |
441 | silentSpillAllRegisters(exclude.payloadGPR()); |
442 | #else |
443 | silentSpillAllRegisters(exclude.payloadGPR(), exclude.tagGPR()); |
444 | #endif |
445 | } |
446 | |
447 | void silentFillAllRegisters() |
448 | { |
449 | while (!m_plans.isEmpty()) { |
450 | SilentRegisterSavePlan& plan = m_plans.last(); |
451 | silentFill(plan); |
452 | m_plans.removeLast(); |
453 | } |
454 | } |
455 | |
456 | // These methods convert between doubles, and doubles boxed and JSValues. |
457 | #if USE(JSVALUE64) |
458 | GPRReg boxDouble(FPRReg fpr, GPRReg gpr) |
459 | { |
460 | return m_jit.boxDouble(fpr, gpr); |
461 | } |
462 | FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr) |
463 | { |
464 | return m_jit.unboxDouble(gpr, resultGPR, fpr); |
465 | } |
466 | GPRReg boxDouble(FPRReg fpr) |
467 | { |
468 | return boxDouble(fpr, allocate()); |
469 | } |
470 | |
471 | void boxInt52(GPRReg sourceGPR, GPRReg targetGPR, DataFormat); |
472 | #elif USE(JSVALUE32_64) |
473 | void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR) |
474 | { |
475 | m_jit.boxDouble(fpr, tagGPR, payloadGPR); |
476 | } |
477 | void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR) |
478 | { |
479 | m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR); |
480 | } |
481 | #endif |
482 | void boxDouble(FPRReg fpr, JSValueRegs regs) |
483 | { |
484 | m_jit.boxDouble(fpr, regs); |
485 | } |
486 | |
487 | // Spill a VirtualRegister to the JSStack. |
488 | void spill(VirtualRegister spillMe) |
489 | { |
490 | GenerationInfo& info = generationInfoFromVirtualRegister(spillMe); |
491 | |
492 | #if USE(JSVALUE32_64) |
493 | if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here |
494 | return; |
495 | #endif |
496 | // Check the GenerationInfo to see if this value need writing |
497 | // to the JSStack - if not, mark it as spilled & return. |
498 | if (!info.needsSpill()) { |
499 | info.setSpilled(*m_stream, spillMe); |
500 | return; |
501 | } |
502 | |
503 | DataFormat spillFormat = info.registerFormat(); |
504 | switch (spillFormat) { |
505 | case DataFormatStorage: { |
506 | // This is special, since it's not a JS value - as in it's not visible to JS |
507 | // code. |
508 | m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe)); |
509 | info.spill(*m_stream, spillMe, DataFormatStorage); |
510 | return; |
511 | } |
512 | |
513 | case DataFormatInt32: { |
514 | m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe)); |
515 | info.spill(*m_stream, spillMe, DataFormatInt32); |
516 | return; |
517 | } |
518 | |
519 | #if USE(JSVALUE64) |
520 | case DataFormatDouble: { |
521 | m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe)); |
522 | info.spill(*m_stream, spillMe, DataFormatDouble); |
523 | return; |
524 | } |
525 | |
526 | case DataFormatInt52: |
527 | case DataFormatStrictInt52: { |
528 | m_jit.store64(info.gpr(), JITCompiler::addressFor(spillMe)); |
529 | info.spill(*m_stream, spillMe, spillFormat); |
530 | return; |
531 | } |
532 | |
533 | default: |
534 | // The following code handles JSValues, int32s, and cells. |
535 | RELEASE_ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS); |
536 | |
537 | GPRReg reg = info.gpr(); |
538 | // We need to box int32 and cell values ... |
539 | // but on JSVALUE64 boxing a cell is a no-op! |
540 | if (spillFormat == DataFormatInt32) |
541 | m_jit.or64(GPRInfo::tagTypeNumberRegister, reg); |
542 | |
543 | // Spill the value, and record it as spilled in its boxed form. |
544 | m_jit.store64(reg, JITCompiler::addressFor(spillMe)); |
545 | info.spill(*m_stream, spillMe, (DataFormat)(spillFormat | DataFormatJS)); |
546 | return; |
547 | #elif USE(JSVALUE32_64) |
548 | case DataFormatCell: |
549 | case DataFormatBoolean: { |
550 | m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe)); |
551 | info.spill(*m_stream, spillMe, spillFormat); |
552 | return; |
553 | } |
554 | |
555 | case DataFormatDouble: { |
556 | // On JSVALUE32_64 boxing a double is a no-op. |
557 | m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe)); |
558 | info.spill(*m_stream, spillMe, DataFormatDouble); |
559 | return; |
560 | } |
561 | |
562 | default: |
563 | // The following code handles JSValues. |
564 | RELEASE_ASSERT(spillFormat & DataFormatJS); |
565 | m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe)); |
566 | m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe)); |
567 | info.spill(*m_stream, spillMe, spillFormat); |
568 | return; |
569 | #endif |
570 | } |
571 | } |
572 | |
573 | bool isKnownInteger(Node* node) { return m_state.forNode(node).isType(SpecInt32Only); } |
574 | bool isKnownCell(Node* node) { return m_state.forNode(node).isType(SpecCell); } |
575 | |
576 | bool isKnownNotInteger(Node* node) { return !(m_state.forNode(node).m_type & SpecInt32Only); } |
577 | bool isKnownNotNumber(Node* node) { return !(m_state.forNode(node).m_type & SpecFullNumber); } |
578 | bool isKnownNotCell(Node* node) { return !(m_state.forNode(node).m_type & SpecCell); } |
579 | bool isKnownNotOther(Node* node) { return !(m_state.forNode(node).m_type & SpecOther); } |
580 | |
581 | bool canBeRope(Edge&); |
582 | |
583 | UniquedStringImpl* identifierUID(unsigned index) |
584 | { |
585 | return m_jit.graph().identifiers()[index]; |
586 | } |
587 | |
588 | // Spill all VirtualRegisters back to the JSStack. |
589 | void flushRegisters() |
590 | { |
591 | for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { |
592 | if (iter.name().isValid()) { |
593 | spill(iter.name()); |
594 | iter.release(); |
595 | } |
596 | } |
597 | for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { |
598 | if (iter.name().isValid()) { |
599 | spill(iter.name()); |
600 | iter.release(); |
601 | } |
602 | } |
603 | } |
604 | |
605 | // Used to ASSERT flushRegisters() has been called prior to |
606 | // calling out from JIT code to a C helper function. |
607 | bool isFlushed() |
608 | { |
609 | for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { |
610 | if (iter.name().isValid()) |
611 | return false; |
612 | } |
613 | for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { |
614 | if (iter.name().isValid()) |
615 | return false; |
616 | } |
617 | return true; |
618 | } |
619 | |
620 | #if USE(JSVALUE64) |
621 | static MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node) |
622 | { |
623 | return MacroAssembler::Imm64(JSValue::encode(node->asJSValue())); |
624 | } |
625 | #endif |
626 | |
627 | // Helper functions to enable code sharing in implementations of bit/shift ops. |
628 | void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result) |
629 | { |
630 | switch (op) { |
631 | case ArithBitAnd: |
632 | m_jit.and32(Imm32(imm), op1, result); |
633 | break; |
634 | case ArithBitOr: |
635 | m_jit.or32(Imm32(imm), op1, result); |
636 | break; |
637 | case ArithBitXor: |
638 | m_jit.xor32(Imm32(imm), op1, result); |
639 | break; |
640 | default: |
641 | RELEASE_ASSERT_NOT_REACHED(); |
642 | } |
643 | } |
644 | void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result) |
645 | { |
646 | switch (op) { |
647 | case ArithBitAnd: |
648 | m_jit.and32(op1, op2, result); |
649 | break; |
650 | case ArithBitOr: |
651 | m_jit.or32(op1, op2, result); |
652 | break; |
653 | case ArithBitXor: |
654 | m_jit.xor32(op1, op2, result); |
655 | break; |
656 | default: |
657 | RELEASE_ASSERT_NOT_REACHED(); |
658 | } |
659 | } |
660 | void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result) |
661 | { |
662 | switch (op) { |
663 | case BitRShift: |
664 | m_jit.rshift32(op1, Imm32(shiftAmount), result); |
665 | break; |
666 | case BitLShift: |
667 | m_jit.lshift32(op1, Imm32(shiftAmount), result); |
668 | break; |
669 | case BitURShift: |
670 | m_jit.urshift32(op1, Imm32(shiftAmount), result); |
671 | break; |
672 | default: |
673 | RELEASE_ASSERT_NOT_REACHED(); |
674 | } |
675 | } |
676 | void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result) |
677 | { |
678 | switch (op) { |
679 | case BitRShift: |
680 | m_jit.rshift32(op1, shiftAmount, result); |
681 | break; |
682 | case BitLShift: |
683 | m_jit.lshift32(op1, shiftAmount, result); |
684 | break; |
685 | case BitURShift: |
686 | m_jit.urshift32(op1, shiftAmount, result); |
687 | break; |
688 | default: |
689 | RELEASE_ASSERT_NOT_REACHED(); |
690 | } |
691 | } |
692 | |
693 | // Returns the index of the branch node if peephole is okay, UINT_MAX otherwise. |
694 | unsigned detectPeepHoleBranch() |
695 | { |
696 | // Check that no intervening nodes will be generated. |
697 | for (unsigned index = m_indexInBlock + 1; index < m_block->size() - 1; ++index) { |
698 | Node* node = m_block->at(index); |
699 | if (!node->shouldGenerate()) |
700 | continue; |
701 | // Check if it's a Phantom that can be safely ignored. |
702 | if (node->op() == Phantom && !node->child1()) |
703 | continue; |
704 | return UINT_MAX; |
705 | } |
706 | |
707 | // Check if the lastNode is a branch on this node. |
708 | Node* lastNode = m_block->terminal(); |
709 | return lastNode->op() == Branch && lastNode->child1() == m_currentNode ? m_block->size() - 1 : UINT_MAX; |
710 | } |
711 | |
712 | void compileCheckTraps(Node*); |
713 | |
714 | void compileMovHint(Node*); |
715 | void compileMovHintAndCheck(Node*); |
716 | |
717 | void cachedGetById(CodeOrigin, JSValueRegs base, JSValueRegs result, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode, AccessType); |
718 | void cachedPutById(CodeOrigin, GPRReg baseGPR, JSValueRegs valueRegs, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill); |
719 | |
720 | #if USE(JSVALUE64) |
721 | void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode, AccessType); |
722 | void cachedGetByIdWithThis(CodeOrigin, GPRReg baseGPR, GPRReg thisGPR, GPRReg resultGPR, unsigned identifierNumber, const JITCompiler::JumpList& slowPathTarget = JITCompiler::JumpList()); |
723 | #elif USE(JSVALUE32_64) |
724 | void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode, AccessType); |
725 | void cachedGetByIdWithThis(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg thisTagGPROrNone, GPRReg thisPayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, const JITCompiler::JumpList& slowPathTarget = JITCompiler::JumpList()); |
726 | #endif |
727 | |
728 | void compileDeleteById(Node*); |
729 | void compileDeleteByVal(Node*); |
730 | void compilePushWithScope(Node*); |
731 | void compileGetById(Node*, AccessType); |
732 | void compileGetByIdFlush(Node*, AccessType); |
733 | void compileInById(Node*); |
734 | void compileInByVal(Node*); |
735 | |
736 | void nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand); |
737 | void nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode); |
738 | |
739 | void nonSpeculativePeepholeBranch(Node*, Node* branchNode, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction); |
740 | void nonSpeculativeNonPeepholeCompare(Node*, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction); |
741 | |
742 | void nonSpeculativePeepholeStrictEq(Node*, Node* branchNode, bool invert = false); |
743 | void nonSpeculativeNonPeepholeStrictEq(Node*, bool invert = false); |
744 | bool nonSpeculativeStrictEq(Node*, bool invert = false); |
745 | |
746 | void compileInstanceOfForCells(Node*, JSValueRegs valueGPR, JSValueRegs prototypeGPR, GPRReg resultGPT, GPRReg scratchGPR, GPRReg scratch2GPR, JITCompiler::Jump slowCase = JITCompiler::Jump()); |
747 | void compileInstanceOf(Node*); |
748 | void compileInstanceOfCustom(Node*); |
749 | void compileOverridesHasInstance(Node*); |
750 | |
751 | void compileIsCellWithType(Node*); |
752 | void compileIsTypedArrayView(Node*); |
753 | |
754 | void emitCall(Node*); |
755 | |
756 | void emitAllocateButterfly(GPRReg storageGPR, GPRReg sizeGPR, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, MacroAssembler::JumpList& slowCases); |
757 | void emitInitializeButterfly(GPRReg storageGPR, GPRReg sizeGPR, JSValueRegs emptyValueRegs, GPRReg scratchGPR); |
758 | void compileAllocateNewArrayWithSize(JSGlobalObject*, GPRReg resultGPR, GPRReg sizeGPR, IndexingType, bool shouldConvertLargeSizeToArrayStorage = true); |
759 | |
760 | // Called once a node has completed code generation but prior to setting |
761 | // its result, to free up its children. (This must happen prior to setting |
762 | // the nodes result, since the node may have the same VirtualRegister as |
763 | // a child, and as such will use the same GeneratioInfo). |
764 | void useChildren(Node*); |
765 | |
766 | // These method called to initialize the GenerationInfo |
767 | // to describe the result of an operation. |
768 | void int32Result(GPRReg reg, Node* node, DataFormat format = DataFormatInt32, UseChildrenMode mode = CallUseChildren) |
769 | { |
770 | if (mode == CallUseChildren) |
771 | useChildren(node); |
772 | |
773 | VirtualRegister virtualRegister = node->virtualRegister(); |
774 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
775 | |
776 | if (format == DataFormatInt32) { |
777 | m_jit.jitAssertIsInt32(reg); |
778 | m_gprs.retain(reg, virtualRegister, SpillOrderInteger); |
779 | info.initInt32(node, node->refCount(), reg); |
780 | } else { |
781 | #if USE(JSVALUE64) |
782 | RELEASE_ASSERT(format == DataFormatJSInt32); |
783 | m_jit.jitAssertIsJSInt32(reg); |
784 | m_gprs.retain(reg, virtualRegister, SpillOrderJS); |
785 | info.initJSValue(node, node->refCount(), reg, format); |
786 | #elif USE(JSVALUE32_64) |
787 | RELEASE_ASSERT_NOT_REACHED(); |
788 | #endif |
789 | } |
790 | } |
791 | void int32Result(GPRReg reg, Node* node, UseChildrenMode mode) |
792 | { |
793 | int32Result(reg, node, DataFormatInt32, mode); |
794 | } |
795 | void int52Result(GPRReg reg, Node* node, DataFormat format, UseChildrenMode mode = CallUseChildren) |
796 | { |
797 | if (mode == CallUseChildren) |
798 | useChildren(node); |
799 | |
800 | VirtualRegister virtualRegister = node->virtualRegister(); |
801 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
802 | |
803 | m_gprs.retain(reg, virtualRegister, SpillOrderJS); |
804 | info.initInt52(node, node->refCount(), reg, format); |
805 | } |
806 | void int52Result(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) |
807 | { |
808 | int52Result(reg, node, DataFormatInt52, mode); |
809 | } |
810 | void strictInt52Result(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) |
811 | { |
812 | int52Result(reg, node, DataFormatStrictInt52, mode); |
813 | } |
814 | void noResult(Node* node, UseChildrenMode mode = CallUseChildren) |
815 | { |
816 | if (mode == UseChildrenCalledExplicitly) |
817 | return; |
818 | useChildren(node); |
819 | } |
820 | void cellResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) |
821 | { |
822 | if (mode == CallUseChildren) |
823 | useChildren(node); |
824 | |
825 | VirtualRegister virtualRegister = node->virtualRegister(); |
826 | m_gprs.retain(reg, virtualRegister, SpillOrderCell); |
827 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
828 | info.initCell(node, node->refCount(), reg); |
829 | } |
830 | void blessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) |
831 | { |
832 | #if USE(JSVALUE64) |
833 | jsValueResult(reg, node, DataFormatJSBoolean, mode); |
834 | #else |
835 | booleanResult(reg, node, mode); |
836 | #endif |
837 | } |
838 | void unblessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) |
839 | { |
840 | #if USE(JSVALUE64) |
841 | blessBoolean(reg); |
842 | #endif |
843 | blessedBooleanResult(reg, node, mode); |
844 | } |
845 | #if USE(JSVALUE64) |
846 | void jsValueResult(GPRReg reg, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) |
847 | { |
848 | if (format == DataFormatJSInt32) |
849 | m_jit.jitAssertIsJSInt32(reg); |
850 | |
851 | if (mode == CallUseChildren) |
852 | useChildren(node); |
853 | |
854 | VirtualRegister virtualRegister = node->virtualRegister(); |
855 | m_gprs.retain(reg, virtualRegister, SpillOrderJS); |
856 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
857 | info.initJSValue(node, node->refCount(), reg, format); |
858 | } |
859 | void jsValueResult(GPRReg reg, Node* node, UseChildrenMode mode) |
860 | { |
861 | jsValueResult(reg, node, DataFormatJS, mode); |
862 | } |
863 | #elif USE(JSVALUE32_64) |
864 | void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) |
865 | { |
866 | if (mode == CallUseChildren) |
867 | useChildren(node); |
868 | |
869 | VirtualRegister virtualRegister = node->virtualRegister(); |
870 | m_gprs.retain(reg, virtualRegister, SpillOrderBoolean); |
871 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
872 | info.initBoolean(node, node->refCount(), reg); |
873 | } |
874 | void jsValueResult(GPRReg tag, GPRReg payload, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) |
875 | { |
876 | if (mode == CallUseChildren) |
877 | useChildren(node); |
878 | |
879 | VirtualRegister virtualRegister = node->virtualRegister(); |
880 | m_gprs.retain(tag, virtualRegister, SpillOrderJS); |
881 | m_gprs.retain(payload, virtualRegister, SpillOrderJS); |
882 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
883 | info.initJSValue(node, node->refCount(), tag, payload, format); |
884 | } |
885 | void jsValueResult(GPRReg tag, GPRReg payload, Node* node, UseChildrenMode mode) |
886 | { |
887 | jsValueResult(tag, payload, node, DataFormatJS, mode); |
888 | } |
889 | #endif |
890 | void jsValueResult(JSValueRegs regs, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) |
891 | { |
892 | #if USE(JSVALUE64) |
893 | jsValueResult(regs.gpr(), node, format, mode); |
894 | #else |
895 | jsValueResult(regs.tagGPR(), regs.payloadGPR(), node, format, mode); |
896 | #endif |
897 | } |
898 | void storageResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) |
899 | { |
900 | if (mode == CallUseChildren) |
901 | useChildren(node); |
902 | |
903 | VirtualRegister virtualRegister = node->virtualRegister(); |
904 | m_gprs.retain(reg, virtualRegister, SpillOrderStorage); |
905 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
906 | info.initStorage(node, node->refCount(), reg); |
907 | } |
908 | void doubleResult(FPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) |
909 | { |
910 | if (mode == CallUseChildren) |
911 | useChildren(node); |
912 | |
913 | VirtualRegister virtualRegister = node->virtualRegister(); |
914 | m_fprs.retain(reg, virtualRegister, SpillOrderDouble); |
915 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
916 | info.initDouble(node, node->refCount(), reg); |
917 | } |
918 | void initConstantInfo(Node* node) |
919 | { |
920 | ASSERT(node->hasConstant()); |
921 | generationInfo(node).initConstant(node, node->refCount()); |
922 | } |
923 | |
924 | #define FIRST_ARGUMENT_TYPE typename FunctionTraits<OperationType>::template ArgumentType<0> |
925 | |
926 | template<typename OperationType, typename ResultRegType, typename... Args> |
927 | std::enable_if_t< |
928 | FunctionTraits<OperationType>::hasResult, |
929 | JITCompiler::Call> |
930 | callOperation(OperationType operation, ResultRegType result, Args... args) |
931 | { |
932 | m_jit.setupArguments<OperationType>(args...); |
933 | return appendCallSetResult(operation, result); |
934 | } |
935 | |
936 | template<typename OperationType, typename Arg, typename... Args> |
937 | std::enable_if_t< |
938 | !FunctionTraits<OperationType>::hasResult |
939 | && !std::is_same<Arg, NoResultTag>::value, |
940 | JITCompiler::Call> |
941 | callOperation(OperationType operation, Arg arg, Args... args) |
942 | { |
943 | m_jit.setupArguments<OperationType>(arg, args...); |
944 | return appendCall(operation); |
945 | } |
946 | |
947 | template<typename OperationType, typename... Args> |
948 | std::enable_if_t< |
949 | !FunctionTraits<OperationType>::hasResult, |
950 | JITCompiler::Call> |
951 | callOperation(OperationType operation, NoResultTag, Args... args) |
952 | { |
953 | m_jit.setupArguments<OperationType>(args...); |
954 | return appendCall(operation); |
955 | } |
956 | |
957 | template<typename OperationType> |
958 | std::enable_if_t< |
959 | !FunctionTraits<OperationType>::hasResult, |
960 | JITCompiler::Call> |
961 | callOperation(OperationType operation) |
962 | { |
963 | m_jit.setupArguments<OperationType>(); |
964 | return appendCall(operation); |
965 | } |
966 | |
967 | #undef FIRST_ARGUMENT_TYPE |
968 | |
969 | JITCompiler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb operation, void* pointer) |
970 | { |
971 | m_jit.setupArguments<V_JITOperation_ECb>(TrustedImmPtr(pointer)); |
972 | return appendCallWithCallFrameRollbackOnException(operation); |
973 | } |
974 | |
975 | JITCompiler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E operation, GPRReg result) |
976 | { |
977 | m_jit.setupArguments<Z_JITOperation_E>(); |
978 | return appendCallWithCallFrameRollbackOnExceptionSetResult(operation, result); |
979 | } |
980 | |
981 | #if !defined(NDEBUG) && !CPU(ARM_THUMB2) && !CPU(MIPS) |
982 | void prepareForExternalCall() |
983 | { |
984 | // We're about to call out to a "native" helper function. The helper |
985 | // function is expected to set topCallFrame itself with the ExecState |
986 | // that is passed to it. |
987 | // |
988 | // We explicitly trash topCallFrame here so that we'll know if some of |
989 | // the helper functions are not setting topCallFrame when they should |
990 | // be doing so. Note: the previous value in topcallFrame was not valid |
991 | // anyway since it was not being updated by JIT'ed code by design. |
992 | |
993 | for (unsigned i = 0; i < sizeof(void*) / 4; i++) |
994 | m_jit.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&m_jit.vm()->topCallFrame) + i * 4); |
995 | } |
996 | #else |
997 | void prepareForExternalCall() { } |
998 | #endif |
999 | |
1000 | // These methods add call instructions, optionally setting results, and optionally rolling back the call frame on an exception. |
1001 | JITCompiler::Call appendCall(const FunctionPtr<CFunctionPtrTag> function) |
1002 | { |
1003 | prepareForExternalCall(); |
1004 | m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic); |
1005 | return m_jit.appendCall(function); |
1006 | } |
1007 | |
1008 | JITCompiler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr<CFunctionPtrTag> function) |
1009 | { |
1010 | JITCompiler::Call call = appendCall(function); |
1011 | m_jit.exceptionCheckWithCallFrameRollback(); |
1012 | return call; |
1013 | } |
1014 | |
1015 | JITCompiler::Call appendCallWithCallFrameRollbackOnExceptionSetResult(const FunctionPtr<CFunctionPtrTag> function, GPRReg result) |
1016 | { |
1017 | JITCompiler::Call call = appendCallWithCallFrameRollbackOnException(function); |
1018 | if ((result != InvalidGPRReg) && (result != GPRInfo::returnValueGPR)) |
1019 | m_jit.move(GPRInfo::returnValueGPR, result); |
1020 | return call; |
1021 | } |
1022 | |
1023 | JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, GPRReg result) |
1024 | { |
1025 | JITCompiler::Call call = appendCall(function); |
1026 | if (result != InvalidGPRReg) |
1027 | m_jit.move(GPRInfo::returnValueGPR, result); |
1028 | return call; |
1029 | } |
1030 | |
1031 | JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, GPRReg result1, GPRReg result2) |
1032 | { |
1033 | JITCompiler::Call call = appendCall(function); |
1034 | m_jit.setupResults(result1, result2); |
1035 | return call; |
1036 | } |
1037 | |
1038 | JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, JSValueRegs resultRegs) |
1039 | { |
1040 | #if USE(JSVALUE64) |
1041 | return appendCallSetResult(function, resultRegs.gpr()); |
1042 | #else |
1043 | return appendCallSetResult(function, resultRegs.payloadGPR(), resultRegs.tagGPR()); |
1044 | #endif |
1045 | } |
1046 | |
1047 | #if CPU(X86) |
1048 | JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, FPRReg result) |
1049 | { |
1050 | JITCompiler::Call call = appendCall(function); |
1051 | if (result != InvalidFPRReg) { |
1052 | m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister); |
1053 | m_jit.loadDouble(JITCompiler::stackPointerRegister, result); |
1054 | } |
1055 | return call; |
1056 | } |
1057 | #elif CPU(ARM_THUMB2) && !CPU(ARM_HARDFP) |
1058 | JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, FPRReg result) |
1059 | { |
1060 | JITCompiler::Call call = appendCall(function); |
1061 | if (result != InvalidFPRReg) |
1062 | m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2); |
1063 | return call; |
1064 | } |
1065 | #else // CPU(X86_64) || (CPU(ARM_THUMB2) && CPU(ARM_HARDFP)) || CPU(ARM64) || CPU(MIPS) |
1066 | JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, FPRReg result) |
1067 | { |
1068 | JITCompiler::Call call = appendCall(function); |
1069 | if (result != InvalidFPRReg) |
1070 | m_jit.moveDouble(FPRInfo::returnValueFPR, result); |
1071 | return call; |
1072 | } |
1073 | #endif |
1074 | |
1075 | void branchDouble(JITCompiler::DoubleCondition cond, FPRReg left, FPRReg right, BasicBlock* destination) |
1076 | { |
1077 | return addBranch(m_jit.branchDouble(cond, left, right), destination); |
1078 | } |
1079 | |
1080 | void branchDoubleNonZero(FPRReg value, FPRReg scratch, BasicBlock* destination) |
1081 | { |
1082 | return addBranch(m_jit.branchDoubleNonZero(value, scratch), destination); |
1083 | } |
1084 | |
1085 | template<typename T, typename U> |
1086 | void branch32(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination) |
1087 | { |
1088 | return addBranch(m_jit.branch32(cond, left, right), destination); |
1089 | } |
1090 | |
1091 | template<typename T, typename U> |
1092 | void branchTest32(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination) |
1093 | { |
1094 | return addBranch(m_jit.branchTest32(cond, value, mask), destination); |
1095 | } |
1096 | |
1097 | template<typename T> |
1098 | void branchTest32(JITCompiler::ResultCondition cond, T value, BasicBlock* destination) |
1099 | { |
1100 | return addBranch(m_jit.branchTest32(cond, value), destination); |
1101 | } |
1102 | |
1103 | #if USE(JSVALUE64) |
1104 | template<typename T, typename U> |
1105 | void branch64(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination) |
1106 | { |
1107 | return addBranch(m_jit.branch64(cond, left, right), destination); |
1108 | } |
1109 | #endif |
1110 | |
1111 | template<typename T, typename U> |
1112 | void branch8(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination) |
1113 | { |
1114 | return addBranch(m_jit.branch8(cond, left, right), destination); |
1115 | } |
1116 | |
1117 | template<typename T, typename U> |
1118 | void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination) |
1119 | { |
1120 | return addBranch(m_jit.branchPtr(cond, left, right), destination); |
1121 | } |
1122 | |
1123 | template<typename T, typename U> |
1124 | void branchTestPtr(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination) |
1125 | { |
1126 | return addBranch(m_jit.branchTestPtr(cond, value, mask), destination); |
1127 | } |
1128 | |
1129 | template<typename T> |
1130 | void branchTestPtr(JITCompiler::ResultCondition cond, T value, BasicBlock* destination) |
1131 | { |
1132 | return addBranch(m_jit.branchTestPtr(cond, value), destination); |
1133 | } |
1134 | |
1135 | template<typename T, typename U> |
1136 | void branchTest8(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination) |
1137 | { |
1138 | return addBranch(m_jit.branchTest8(cond, value, mask), destination); |
1139 | } |
1140 | |
1141 | template<typename T> |
1142 | void branchTest8(JITCompiler::ResultCondition cond, T value, BasicBlock* destination) |
1143 | { |
1144 | return addBranch(m_jit.branchTest8(cond, value), destination); |
1145 | } |
1146 | |
1147 | enum FallThroughMode { |
1148 | AtFallThroughPoint, |
1149 | ForceJump |
1150 | }; |
1151 | void jump(BasicBlock* destination, FallThroughMode fallThroughMode = AtFallThroughPoint) |
1152 | { |
1153 | if (destination == nextBlock() |
1154 | && fallThroughMode == AtFallThroughPoint) |
1155 | return; |
1156 | addBranch(m_jit.jump(), destination); |
1157 | } |
1158 | |
1159 | void addBranch(const MacroAssembler::Jump& jump, BasicBlock* destination) |
1160 | { |
1161 | m_branches.append(BranchRecord(jump, destination)); |
1162 | } |
1163 | void addBranch(const MacroAssembler::JumpList& jump, BasicBlock* destination); |
1164 | |
1165 | void linkBranches(); |
1166 | |
1167 | void dump(const char* label = 0); |
1168 | |
1169 | bool betterUseStrictInt52(Node* node) |
1170 | { |
1171 | return !generationInfo(node).isInt52(); |
1172 | } |
1173 | bool betterUseStrictInt52(Edge edge) |
1174 | { |
1175 | return betterUseStrictInt52(edge.node()); |
1176 | } |
1177 | |
1178 | bool compare(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_JITOperation_EJJ); |
1179 | void compileCompareUnsigned(Node*, MacroAssembler::RelationalCondition); |
1180 | bool compilePeepHoleBranch(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_JITOperation_EJJ); |
1181 | void compilePeepHoleInt32Branch(Node*, Node* branchNode, JITCompiler::RelationalCondition); |
1182 | void compilePeepHoleInt52Branch(Node*, Node* branchNode, JITCompiler::RelationalCondition); |
1183 | void compilePeepHoleBooleanBranch(Node*, Node* branchNode, JITCompiler::RelationalCondition); |
1184 | void compilePeepHoleDoubleBranch(Node*, Node* branchNode, JITCompiler::DoubleCondition); |
1185 | void compilePeepHoleObjectEquality(Node*, Node* branchNode); |
1186 | void compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode); |
1187 | void compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode); |
1188 | void compileObjectEquality(Node*); |
1189 | void compileObjectStrictEquality(Edge objectChild, Edge otherChild); |
1190 | void compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild); |
1191 | void compileObjectOrOtherLogicalNot(Edge value); |
1192 | void compileLogicalNot(Node*); |
1193 | void compileLogicalNotStringOrOther(Node*); |
1194 | void compileStringEquality( |
1195 | Node*, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, |
1196 | GPRReg leftTempGPR, GPRReg rightTempGPR, GPRReg leftTemp2GPR, |
1197 | GPRReg rightTemp2GPR, const JITCompiler::JumpList& fastTrue, |
1198 | const JITCompiler::JumpList& fastSlow); |
1199 | void compileStringEquality(Node*); |
1200 | void compileStringIdentEquality(Node*); |
1201 | void compileStringToUntypedEquality(Node*, Edge stringEdge, Edge untypedEdge); |
1202 | void compileStringIdentToNotStringVarEquality(Node*, Edge stringEdge, Edge notStringVarEdge); |
1203 | void compileStringZeroLength(Node*); |
1204 | void compileMiscStrictEq(Node*); |
1205 | |
1206 | void compileSymbolEquality(Node*); |
1207 | void compileBigIntEquality(Node*); |
1208 | void compilePeepHoleSymbolEquality(Node*, Node* branchNode); |
1209 | void compileSymbolUntypedEquality(Node*, Edge symbolEdge, Edge untypedEdge); |
1210 | |
1211 | void emitObjectOrOtherBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken); |
1212 | void emitStringBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken); |
1213 | void emitStringOrOtherBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken); |
1214 | void emitBranch(Node*); |
1215 | |
1216 | struct StringSwitchCase { |
1217 | StringSwitchCase() { } |
1218 | |
1219 | StringSwitchCase(StringImpl* string, BasicBlock* target) |
1220 | : string(string) |
1221 | , target(target) |
1222 | { |
1223 | } |
1224 | |
1225 | bool operator<(const StringSwitchCase& other) const |
1226 | { |
1227 | return stringLessThan(*string, *other.string); |
1228 | } |
1229 | |
1230 | StringImpl* string; |
1231 | BasicBlock* target; |
1232 | }; |
1233 | |
1234 | void emitSwitchIntJump(SwitchData*, GPRReg value, GPRReg scratch); |
1235 | void emitSwitchImm(Node*, SwitchData*); |
1236 | void emitSwitchCharStringJump(SwitchData*, GPRReg value, GPRReg scratch); |
1237 | void emitSwitchChar(Node*, SwitchData*); |
1238 | void emitBinarySwitchStringRecurse( |
1239 | SwitchData*, const Vector<StringSwitchCase>&, unsigned numChecked, |
1240 | unsigned begin, unsigned end, GPRReg buffer, GPRReg length, GPRReg temp, |
1241 | unsigned alreadyCheckedLength, bool checkedExactLength); |
1242 | void emitSwitchStringOnString(SwitchData*, GPRReg string); |
1243 | void emitSwitchString(Node*, SwitchData*); |
1244 | void emitSwitch(Node*); |
1245 | |
1246 | void compileToStringOrCallStringConstructorOrStringValueOf(Node*); |
1247 | void compileNumberToStringWithRadix(Node*); |
1248 | void compileNumberToStringWithValidRadixConstant(Node*); |
1249 | void compileNumberToStringWithValidRadixConstant(Node*, int32_t radix); |
1250 | void compileNewStringObject(Node*); |
1251 | void compileNewSymbol(Node*); |
1252 | |
1253 | void compileNewTypedArrayWithSize(Node*); |
1254 | |
1255 | void compileInt32Compare(Node*, MacroAssembler::RelationalCondition); |
1256 | void compileInt52Compare(Node*, MacroAssembler::RelationalCondition); |
1257 | void compileBooleanCompare(Node*, MacroAssembler::RelationalCondition); |
1258 | void compileDoubleCompare(Node*, MacroAssembler::DoubleCondition); |
1259 | void compileStringCompare(Node*, MacroAssembler::RelationalCondition); |
1260 | void compileStringIdentCompare(Node*, MacroAssembler::RelationalCondition); |
1261 | |
1262 | bool compileStrictEq(Node*); |
1263 | |
1264 | void compileSameValue(Node*); |
1265 | |
1266 | void compileAllocatePropertyStorage(Node*); |
1267 | void compileReallocatePropertyStorage(Node*); |
1268 | void compileNukeStructureAndSetButterfly(Node*); |
1269 | void compileGetButterfly(Node*); |
1270 | void compileCallDOMGetter(Node*); |
1271 | void compileCallDOM(Node*); |
1272 | void compileCheckSubClass(Node*); |
1273 | void compileNormalizeMapKey(Node*); |
1274 | void compileGetMapBucketHead(Node*); |
1275 | void compileGetMapBucketNext(Node*); |
1276 | void compileSetAdd(Node*); |
1277 | void compileMapSet(Node*); |
1278 | void compileWeakMapGet(Node*); |
1279 | void compileWeakSetAdd(Node*); |
1280 | void compileWeakMapSet(Node*); |
1281 | void compileLoadKeyFromMapBucket(Node*); |
1282 | void compileLoadValueFromMapBucket(Node*); |
1283 | void (Node*); |
1284 | void compileGetPrototypeOf(Node*); |
1285 | void compileIdentity(Node*); |
1286 | |
1287 | #if USE(JSVALUE32_64) |
1288 | template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType> |
1289 | void compileContiguousPutByVal(Node*, BaseOperandType&, PropertyOperandType&, ValueOperandType&, GPRReg valuePayloadReg, TagType valueTag); |
1290 | #endif |
1291 | void compileDoublePutByVal(Node*, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property); |
1292 | bool (ArrayMode arrayMode) |
1293 | { |
1294 | return arrayMode.mayStoreToHole(); |
1295 | } |
1296 | GPRReg temporaryRegisterForPutByVal(GPRTemporary&, ArrayMode); |
1297 | GPRReg temporaryRegisterForPutByVal(GPRTemporary& temporary, Node* node) |
1298 | { |
1299 | return temporaryRegisterForPutByVal(temporary, node->arrayMode()); |
1300 | } |
1301 | |
1302 | void compileGetCharCodeAt(Node*); |
1303 | void compileGetByValOnString(Node*); |
1304 | void compileFromCharCode(Node*); |
1305 | |
1306 | void compileGetByValOnDirectArguments(Node*); |
1307 | void compileGetByValOnScopedArguments(Node*); |
1308 | |
1309 | void compileGetScope(Node*); |
1310 | void compileSkipScope(Node*); |
1311 | void compileGetGlobalObject(Node*); |
1312 | void compileGetGlobalThis(Node*); |
1313 | |
1314 | void compileGetArrayLength(Node*); |
1315 | |
1316 | void compileCheckTypeInfoFlags(Node*); |
1317 | void compileCheckStringIdent(Node*); |
1318 | |
1319 | void compileParseInt(Node*); |
1320 | |
1321 | void compileValueRep(Node*); |
1322 | void compileDoubleRep(Node*); |
1323 | |
1324 | void compileValueToInt32(Node*); |
1325 | void compileUInt32ToNumber(Node*); |
1326 | void compileDoubleAsInt32(Node*); |
1327 | |
1328 | void compileValueBitNot(Node*); |
1329 | void compileBitwiseNot(Node*); |
1330 | |
1331 | template<typename SnippetGenerator, J_JITOperation_EJJ slowPathFunction> |
1332 | void emitUntypedBitOp(Node*); |
1333 | void compileBitwiseOp(Node*); |
1334 | void compileValueBitwiseOp(Node*); |
1335 | |
1336 | void emitUntypedRightShiftBitOp(Node*); |
1337 | void compileShiftOp(Node*); |
1338 | |
1339 | template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction> |
1340 | void compileMathIC(Node*, JITBinaryMathIC<Generator>*, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction, NonRepatchingFunction); |
1341 | template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction> |
1342 | void compileMathIC(Node*, JITUnaryMathIC<Generator>*, bool needsScratchGPRReg, RepatchingFunction, NonRepatchingFunction); |
1343 | |
1344 | void compileArithDoubleUnaryOp(Node*, double (*doubleFunction)(double), double (*operation)(ExecState*, EncodedJSValue)); |
1345 | void compileValueAdd(Node*); |
1346 | void compileValueSub(Node*); |
1347 | void compileArithAdd(Node*); |
1348 | void compileMakeRope(Node*); |
1349 | void compileArithAbs(Node*); |
1350 | void compileArithClz32(Node*); |
1351 | void compileArithSub(Node*); |
1352 | void compileValueNegate(Node*); |
1353 | void compileArithNegate(Node*); |
1354 | void compileValueMul(Node*); |
1355 | void compileArithMul(Node*); |
1356 | void compileValueDiv(Node*); |
1357 | void compileArithDiv(Node*); |
1358 | void compileArithFRound(Node*); |
1359 | void compileValueMod(Node*); |
1360 | void compileArithMod(Node*); |
1361 | void compileArithPow(Node*); |
1362 | void compileValuePow(Node*); |
1363 | void compileArithRounding(Node*); |
1364 | void compileArithRandom(Node*); |
1365 | void compileArithUnary(Node*); |
1366 | void compileArithSqrt(Node*); |
1367 | void compileArithMinMax(Node*); |
1368 | void compileConstantStoragePointer(Node*); |
1369 | void compileGetIndexedPropertyStorage(Node*); |
1370 | JITCompiler::Jump jumpForTypedArrayOutOfBounds(Node*, GPRReg baseGPR, GPRReg indexGPR); |
1371 | JITCompiler::Jump jumpForTypedArrayIsNeuteredIfOutOfBounds(Node*, GPRReg baseGPR, JITCompiler::Jump outOfBounds); |
1372 | void emitTypedArrayBoundsCheck(Node*, GPRReg baseGPR, GPRReg indexGPR); |
1373 | void compileGetTypedArrayByteOffset(Node*); |
1374 | void compileGetByValOnIntTypedArray(Node*, TypedArrayType); |
1375 | void compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType); |
1376 | void compileGetByValOnFloatTypedArray(Node*, TypedArrayType); |
1377 | void compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType); |
1378 | void compileGetByValForObjectWithString(Node*); |
1379 | void compileGetByValForObjectWithSymbol(Node*); |
1380 | void compilePutByValForCellWithString(Node*, Edge& child1, Edge& child2, Edge& child3); |
1381 | void compilePutByValForCellWithSymbol(Node*, Edge& child1, Edge& child2, Edge& child3); |
1382 | void compileGetByValWithThis(Node*); |
1383 | void compileGetByOffset(Node*); |
1384 | void compilePutByOffset(Node*); |
1385 | void compileMatchStructure(Node*); |
1386 | // If this returns false it means that we terminated speculative execution. |
1387 | bool getIntTypedArrayStoreOperand( |
1388 | GPRTemporary& value, |
1389 | GPRReg property, |
1390 | #if USE(JSVALUE32_64) |
1391 | GPRTemporary& propertyTag, |
1392 | GPRTemporary& valueTag, |
1393 | #endif |
1394 | Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped = false); |
1395 | void loadFromIntTypedArray(GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType); |
1396 | void setIntTypedArrayLoadResult(Node*, GPRReg resultReg, TypedArrayType, bool canSpeculate = false); |
1397 | template <typename ClassType> void compileNewFunctionCommon(GPRReg, RegisteredStructure, GPRReg, GPRReg, GPRReg, MacroAssembler::JumpList&, size_t, FunctionExecutable*); |
1398 | void compileNewFunction(Node*); |
1399 | void compileSetFunctionName(Node*); |
1400 | void compileNewRegexp(Node*); |
1401 | void compileForwardVarargs(Node*); |
1402 | void compileLoadVarargs(Node*); |
1403 | void compileCreateActivation(Node*); |
1404 | void compileCreateDirectArguments(Node*); |
1405 | void compileGetFromArguments(Node*); |
1406 | void compilePutToArguments(Node*); |
1407 | void compileGetArgument(Node*); |
1408 | void compileCreateScopedArguments(Node*); |
1409 | void compileCreateClonedArguments(Node*); |
1410 | void compileCreateRest(Node*); |
1411 | void compileSpread(Node*); |
1412 | void compileNewArray(Node*); |
1413 | void compileNewArrayWithSpread(Node*); |
1414 | void compileGetRestLength(Node*); |
1415 | void compileArraySlice(Node*); |
1416 | void compileArrayIndexOf(Node*); |
1417 | void compileArrayPush(Node*); |
1418 | void compileNotifyWrite(Node*); |
1419 | void compileRegExpExec(Node*); |
1420 | void compileRegExpExecNonGlobalOrSticky(Node*); |
1421 | void compileRegExpMatchFast(Node*); |
1422 | void compileRegExpMatchFastGlobal(Node*); |
1423 | void compileRegExpTest(Node*); |
1424 | void compileStringReplace(Node*); |
1425 | void compileIsObject(Node*); |
1426 | void compileIsObjectOrNull(Node*); |
1427 | void compileIsFunction(Node*); |
1428 | void compileTypeOf(Node*); |
1429 | void compileCheckCell(Node*); |
1430 | void compileCheckNotEmpty(Node*); |
1431 | void compileCheckStructure(Node*); |
1432 | void emitStructureCheck(Node*, GPRReg cellGPR, GPRReg tempGPR); |
1433 | void compilePutAccessorById(Node*); |
1434 | void compilePutGetterSetterById(Node*); |
1435 | void compilePutAccessorByVal(Node*); |
1436 | void compileGetRegExpObjectLastIndex(Node*); |
1437 | void compileSetRegExpObjectLastIndex(Node*); |
1438 | void compileLazyJSConstant(Node*); |
1439 | void compileMaterializeNewObject(Node*); |
1440 | void compileRecordRegExpCachedResult(Node*); |
1441 | void compileToObjectOrCallObjectConstructor(Node*); |
1442 | void compileResolveScope(Node*); |
1443 | void compileResolveScopeForHoistingFuncDeclInEval(Node*); |
1444 | void compileGetGlobalVariable(Node*); |
1445 | void compilePutGlobalVariable(Node*); |
1446 | void compileGetDynamicVar(Node*); |
1447 | void compilePutDynamicVar(Node*); |
1448 | void compileGetClosureVar(Node*); |
1449 | void compilePutClosureVar(Node*); |
1450 | void compileCompareEqPtr(Node*); |
1451 | void compileDefineDataProperty(Node*); |
1452 | void compileDefineAccessorProperty(Node*); |
1453 | void compileStringSlice(Node*); |
1454 | void compileToLowerCase(Node*); |
1455 | void compileThrow(Node*); |
1456 | void compileThrowStaticError(Node*); |
1457 | void compileGetEnumerableLength(Node*); |
1458 | void compileHasGenericProperty(Node*); |
1459 | void compileToIndexString(Node*); |
1460 | void compilePutByIdFlush(Node*); |
1461 | void compilePutById(Node*); |
1462 | void compilePutByIdDirect(Node*); |
1463 | void compilePutByIdWithThis(Node*); |
1464 | void compileHasStructureProperty(Node*); |
1465 | void compileGetDirectPname(Node*); |
1466 | void compileGetPropertyEnumerator(Node*); |
1467 | void compileGetEnumeratorPname(Node*); |
1468 | void compileGetExecutable(Node*); |
1469 | void compileGetGetter(Node*); |
1470 | void compileGetSetter(Node*); |
1471 | void compileGetCallee(Node*); |
1472 | void compileSetCallee(Node*); |
1473 | void compileGetArgumentCountIncludingThis(Node*); |
1474 | void compileSetArgumentCountIncludingThis(Node*); |
1475 | void compileStrCat(Node*); |
1476 | void compileNewArrayBuffer(Node*); |
1477 | void compileNewArrayWithSize(Node*); |
1478 | void compileNewTypedArray(Node*); |
1479 | void compileToThis(Node*); |
1480 | void compileObjectKeys(Node*); |
1481 | void compileObjectCreate(Node*); |
1482 | void compileCreateThis(Node*); |
1483 | void compileNewObject(Node*); |
1484 | void compileToPrimitive(Node*); |
1485 | void compileLogShadowChickenPrologue(Node*); |
1486 | void compileLogShadowChickenTail(Node*); |
1487 | void compileHasIndexedProperty(Node*); |
1488 | void (Node*); |
1489 | void compileClearCatchLocals(Node*); |
1490 | void compileProfileType(Node*); |
1491 | |
1492 | void moveTrueTo(GPRReg); |
1493 | void moveFalseTo(GPRReg); |
1494 | void blessBoolean(GPRReg); |
1495 | |
1496 | // Allocator for a cell of a specific size. |
1497 | template <typename StructureType> // StructureType can be GPR or ImmPtr. |
1498 | void emitAllocateJSCell( |
1499 | GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure, |
1500 | GPRReg scratchGPR, MacroAssembler::JumpList& slowPath) |
1501 | { |
1502 | m_jit.emitAllocateJSCell(resultGPR, allocator, allocatorGPR, structure, scratchGPR, slowPath); |
1503 | } |
1504 | |
1505 | // Allocator for an object of a specific size. |
1506 | template <typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr. |
1507 | void emitAllocateJSObject( |
1508 | GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure, |
1509 | StorageType storage, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath) |
1510 | { |
1511 | m_jit.emitAllocateJSObject( |
1512 | resultGPR, allocator, allocatorGPR, structure, storage, scratchGPR, slowPath); |
1513 | } |
1514 | |
1515 | template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr. |
1516 | void emitAllocateJSObjectWithKnownSize( |
1517 | GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, |
1518 | GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath, size_t size) |
1519 | { |
1520 | m_jit.emitAllocateJSObjectWithKnownSize<ClassType>(*m_jit.vm(), resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, size); |
1521 | } |
1522 | |
1523 | // Convenience allocator for a built-in object. |
1524 | template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr. |
1525 | void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage, |
1526 | GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath) |
1527 | { |
1528 | m_jit.emitAllocateJSObject<ClassType>(*m_jit.vm(), resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath); |
1529 | } |
1530 | |
1531 | template <typename ClassType, typename StructureType> // StructureType and StorageType can be GPR or ImmPtr. |
1532 | void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath) |
1533 | { |
1534 | m_jit.emitAllocateVariableSizedJSObject<ClassType>(*m_jit.vm(), resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath); |
1535 | } |
1536 | |
1537 | template<typename ClassType> |
1538 | void emitAllocateDestructibleObject(GPRReg resultGPR, RegisteredStructure structure, |
1539 | GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath) |
1540 | { |
1541 | m_jit.emitAllocateDestructibleObject<ClassType>(*m_jit.vm(), resultGPR, structure.get(), scratchGPR1, scratchGPR2, slowPath); |
1542 | } |
1543 | |
1544 | void emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength); |
1545 | |
1546 | void emitGetLength(InlineCallFrame*, GPRReg lengthGPR, bool includeThis = false); |
1547 | void emitGetLength(CodeOrigin, GPRReg lengthGPR, bool includeThis = false); |
1548 | void emitGetCallee(CodeOrigin, GPRReg calleeGPR); |
1549 | void emitGetArgumentStart(CodeOrigin, GPRReg startGPR); |
1550 | void emitPopulateSliceIndex(Edge&, Optional<GPRReg> indexGPR, GPRReg lengthGPR, GPRReg resultGPR); |
1551 | |
1552 | // Generate an OSR exit fuzz check. Returns Jump() if OSR exit fuzz is not enabled, or if |
1553 | // it's in training mode. |
1554 | MacroAssembler::Jump emitOSRExitFuzzCheck(); |
1555 | |
1556 | // Add a speculation check. |
1557 | void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail); |
1558 | void speculationCheck(ExitKind, JSValueSource, Node*, const MacroAssembler::JumpList& jumpsToFail); |
1559 | |
1560 | // Add a speculation check without additional recovery, and with a promise to supply a jump later. |
1561 | OSRExitJumpPlaceholder speculationCheck(ExitKind, JSValueSource, Node*); |
1562 | OSRExitJumpPlaceholder speculationCheck(ExitKind, JSValueSource, Edge); |
1563 | void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail); |
1564 | void speculationCheck(ExitKind, JSValueSource, Edge, const MacroAssembler::JumpList& jumpsToFail); |
1565 | // Add a speculation check with additional recovery. |
1566 | void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&); |
1567 | void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&); |
1568 | |
1569 | void emitInvalidationPoint(Node*); |
1570 | |
1571 | void unreachable(Node*); |
1572 | |
1573 | // Called when we statically determine that a speculation will fail. |
1574 | void terminateSpeculativeExecution(ExitKind, JSValueRegs, Node*); |
1575 | void terminateSpeculativeExecution(ExitKind, JSValueRegs, Edge); |
1576 | |
1577 | // Helpers for performing type checks on an edge stored in the given registers. |
1578 | bool needsTypeCheck(Edge edge, SpeculatedType typesPassedThrough) { return m_interpreter.needsTypeCheck(edge, typesPassedThrough); } |
1579 | void typeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind = BadType); |
1580 | |
1581 | void speculateCellTypeWithoutTypeFiltering(Edge, GPRReg cellGPR, JSType); |
1582 | void speculateCellType(Edge, GPRReg cellGPR, SpeculatedType, JSType); |
1583 | |
1584 | void speculateInt32(Edge); |
1585 | #if USE(JSVALUE64) |
1586 | void convertAnyInt(Edge, GPRReg resultGPR); |
1587 | void speculateAnyInt(Edge); |
1588 | void speculateInt32(Edge, JSValueRegs); |
1589 | void speculateDoubleRepAnyInt(Edge); |
1590 | #endif // USE(JSVALUE64) |
1591 | void speculateNumber(Edge); |
1592 | void speculateRealNumber(Edge); |
1593 | void speculateDoubleRepReal(Edge); |
1594 | void speculateBoolean(Edge); |
1595 | void speculateCell(Edge); |
1596 | void speculateCellOrOther(Edge); |
1597 | void speculateObject(Edge, GPRReg cell); |
1598 | void speculateObject(Edge); |
1599 | void speculateArray(Edge, GPRReg cell); |
1600 | void speculateArray(Edge); |
1601 | void speculateFunction(Edge, GPRReg cell); |
1602 | void speculateFunction(Edge); |
1603 | void speculateFinalObject(Edge, GPRReg cell); |
1604 | void speculateFinalObject(Edge); |
1605 | void speculateRegExpObject(Edge, GPRReg cell); |
1606 | void speculateRegExpObject(Edge); |
1607 | void speculateProxyObject(Edge, GPRReg cell); |
1608 | void speculateProxyObject(Edge); |
1609 | void speculateDerivedArray(Edge, GPRReg cell); |
1610 | void speculateDerivedArray(Edge); |
1611 | void speculateMapObject(Edge); |
1612 | void speculateMapObject(Edge, GPRReg cell); |
1613 | void speculateSetObject(Edge); |
1614 | void speculateSetObject(Edge, GPRReg cell); |
1615 | void speculateWeakMapObject(Edge); |
1616 | void speculateWeakMapObject(Edge, GPRReg cell); |
1617 | void speculateWeakSetObject(Edge); |
1618 | void speculateWeakSetObject(Edge, GPRReg cell); |
1619 | void speculateDataViewObject(Edge); |
1620 | void speculateDataViewObject(Edge, GPRReg cell); |
1621 | void speculateObjectOrOther(Edge); |
1622 | void speculateString(Edge edge, GPRReg cell); |
1623 | void speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage); |
1624 | void speculateStringIdent(Edge edge, GPRReg string); |
1625 | void speculateStringIdent(Edge); |
1626 | void speculateString(Edge); |
1627 | void speculateStringOrOther(Edge, JSValueRegs, GPRReg scratch); |
1628 | void speculateStringOrOther(Edge); |
1629 | void speculateNotStringVar(Edge); |
1630 | void speculateNotSymbol(Edge); |
1631 | void speculateStringObject(Edge, GPRReg); |
1632 | void speculateStringObject(Edge); |
1633 | void speculateStringOrStringObject(Edge); |
1634 | void speculateSymbol(Edge, GPRReg cell); |
1635 | void speculateSymbol(Edge); |
1636 | void speculateBigInt(Edge, GPRReg cell); |
1637 | void speculateBigInt(Edge); |
1638 | void speculateNotCell(Edge, JSValueRegs); |
1639 | void speculateNotCell(Edge); |
1640 | void speculateOther(Edge, JSValueRegs, GPRReg temp); |
1641 | void speculateOther(Edge, JSValueRegs); |
1642 | void speculateOther(Edge); |
1643 | void speculateMisc(Edge, JSValueRegs); |
1644 | void speculateMisc(Edge); |
1645 | void speculate(Node*, Edge); |
1646 | |
1647 | JITCompiler::JumpList jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode); |
1648 | void checkArray(Node*); |
1649 | void arrayify(Node*, GPRReg baseReg, GPRReg propertyReg); |
1650 | void arrayify(Node*); |
1651 | |
1652 | template<bool strict> |
1653 | GPRReg fillSpeculateInt32Internal(Edge, DataFormat& returnFormat); |
1654 | |
1655 | void cageTypedArrayStorage(GPRReg, GPRReg); |
1656 | |
1657 | void recordSetLocal( |
1658 | VirtualRegister bytecodeReg, VirtualRegister machineReg, DataFormat format) |
1659 | { |
1660 | m_stream->appendAndLog(VariableEvent::setLocal(bytecodeReg, machineReg, format)); |
1661 | } |
1662 | |
1663 | void recordSetLocal(DataFormat format) |
1664 | { |
1665 | VariableAccessData* variable = m_currentNode->variableAccessData(); |
1666 | recordSetLocal(variable->local(), variable->machineLocal(), format); |
1667 | } |
1668 | |
1669 | GenerationInfo& generationInfoFromVirtualRegister(VirtualRegister virtualRegister) |
1670 | { |
1671 | return m_generationInfo[virtualRegister.toLocal()]; |
1672 | } |
1673 | |
1674 | GenerationInfo& generationInfo(Node* node) |
1675 | { |
1676 | return generationInfoFromVirtualRegister(node->virtualRegister()); |
1677 | } |
1678 | |
1679 | GenerationInfo& generationInfo(Edge edge) |
1680 | { |
1681 | return generationInfo(edge.node()); |
1682 | } |
1683 | |
1684 | // The JIT, while also provides MacroAssembler functionality. |
1685 | JITCompiler& m_jit; |
1686 | Graph& m_graph; |
1687 | |
1688 | // The current node being generated. |
1689 | BasicBlock* m_block; |
1690 | Node* m_currentNode; |
1691 | NodeType m_lastGeneratedNode; |
1692 | unsigned m_indexInBlock; |
1693 | |
1694 | // Virtual and physical register maps. |
1695 | Vector<GenerationInfo, 32> m_generationInfo; |
1696 | RegisterBank<GPRInfo> m_gprs; |
1697 | RegisterBank<FPRInfo> m_fprs; |
1698 | |
1699 | // It is possible, during speculative generation, to reach a situation in which we |
1700 | // can statically determine a speculation will fail (for example, when two nodes |
1701 | // will make conflicting speculations about the same operand). In such cases this |
1702 | // flag is cleared, indicating no further code generation should take place. |
1703 | bool m_compileOkay; |
1704 | |
1705 | Vector<MacroAssembler::Label> m_osrEntryHeads; |
1706 | |
1707 | struct BranchRecord { |
1708 | BranchRecord(MacroAssembler::Jump jump, BasicBlock* destination) |
1709 | : jump(jump) |
1710 | , destination(destination) |
1711 | { |
1712 | } |
1713 | |
1714 | MacroAssembler::Jump jump; |
1715 | BasicBlock* destination; |
1716 | }; |
1717 | Vector<BranchRecord, 8> m_branches; |
1718 | |
1719 | NodeOrigin m_origin; |
1720 | |
1721 | InPlaceAbstractState m_state; |
1722 | AbstractInterpreter<InPlaceAbstractState> m_interpreter; |
1723 | |
1724 | VariableEventStream* m_stream; |
1725 | MinifiedGraph* m_minifiedGraph; |
1726 | |
1727 | Vector<std::unique_ptr<SlowPathGenerator>, 8> m_slowPathGenerators; |
1728 | struct SlowPathLambda { |
1729 | Function<void()> generator; |
1730 | Node* currentNode; |
1731 | unsigned streamIndex; |
1732 | }; |
1733 | Vector<SlowPathLambda> m_slowPathLambdas; |
1734 | Vector<SilentRegisterSavePlan> m_plans; |
1735 | Optional<unsigned> m_outOfLineStreamIndex; |
1736 | }; |
1737 | |
1738 | |
1739 | // === Operand types === |
1740 | // |
1741 | // These classes are used to lock the operands to a node into machine |
1742 | // registers. These classes implement of pattern of locking a value |
1743 | // into register at the point of construction only if it is already in |
1744 | // registers, and otherwise loading it lazily at the point it is first |
1745 | // used. We do so in order to attempt to avoid spilling one operand |
1746 | // in order to make space available for another. |
1747 | |
1748 | class JSValueOperand { |
1749 | public: |
1750 | explicit JSValueOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) |
1751 | : m_jit(jit) |
1752 | , m_edge(edge) |
1753 | #if USE(JSVALUE64) |
1754 | , m_gprOrInvalid(InvalidGPRReg) |
1755 | #elif USE(JSVALUE32_64) |
1756 | , m_isDouble(false) |
1757 | #endif |
1758 | { |
1759 | ASSERT(m_jit); |
1760 | if (!edge) |
1761 | return; |
1762 | ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse); |
1763 | #if USE(JSVALUE64) |
1764 | if (jit->isFilled(node())) |
1765 | gpr(); |
1766 | #elif USE(JSVALUE32_64) |
1767 | m_register.pair.tagGPR = InvalidGPRReg; |
1768 | m_register.pair.payloadGPR = InvalidGPRReg; |
1769 | if (jit->isFilled(node())) |
1770 | fill(); |
1771 | #endif |
1772 | } |
1773 | |
1774 | explicit JSValueOperand(JSValueOperand&& other) |
1775 | : m_jit(other.m_jit) |
1776 | , m_edge(other.m_edge) |
1777 | { |
1778 | #if USE(JSVALUE64) |
1779 | m_gprOrInvalid = other.m_gprOrInvalid; |
1780 | #elif USE(JSVALUE32_64) |
1781 | m_register.pair.tagGPR = InvalidGPRReg; |
1782 | m_register.pair.payloadGPR = InvalidGPRReg; |
1783 | m_isDouble = other.m_isDouble; |
1784 | |
1785 | if (m_edge) { |
1786 | if (m_isDouble) |
1787 | m_register.fpr = other.m_register.fpr; |
1788 | else |
1789 | m_register.pair = other.m_register.pair; |
1790 | } |
1791 | #endif |
1792 | other.m_edge = Edge(); |
1793 | #if USE(JSVALUE64) |
1794 | other.m_gprOrInvalid = InvalidGPRReg; |
1795 | #elif USE(JSVALUE32_64) |
1796 | other.m_isDouble = false; |
1797 | #endif |
1798 | } |
1799 | |
1800 | ~JSValueOperand() |
1801 | { |
1802 | if (!m_edge) |
1803 | return; |
1804 | #if USE(JSVALUE64) |
1805 | ASSERT(m_gprOrInvalid != InvalidGPRReg); |
1806 | m_jit->unlock(m_gprOrInvalid); |
1807 | #elif USE(JSVALUE32_64) |
1808 | if (m_isDouble) { |
1809 | ASSERT(m_register.fpr != InvalidFPRReg); |
1810 | m_jit->unlock(m_register.fpr); |
1811 | } else { |
1812 | ASSERT(m_register.pair.tagGPR != InvalidGPRReg && m_register.pair.payloadGPR != InvalidGPRReg); |
1813 | m_jit->unlock(m_register.pair.tagGPR); |
1814 | m_jit->unlock(m_register.pair.payloadGPR); |
1815 | } |
1816 | #endif |
1817 | } |
1818 | |
1819 | Edge edge() const |
1820 | { |
1821 | return m_edge; |
1822 | } |
1823 | |
1824 | Node* node() const |
1825 | { |
1826 | return edge().node(); |
1827 | } |
1828 | |
1829 | #if USE(JSVALUE64) |
1830 | GPRReg gpr() |
1831 | { |
1832 | if (m_gprOrInvalid == InvalidGPRReg) |
1833 | m_gprOrInvalid = m_jit->fillJSValue(m_edge); |
1834 | return m_gprOrInvalid; |
1835 | } |
1836 | JSValueRegs jsValueRegs() |
1837 | { |
1838 | return JSValueRegs(gpr()); |
1839 | } |
1840 | #elif USE(JSVALUE32_64) |
1841 | bool isDouble() { return m_isDouble; } |
1842 | |
1843 | void fill() |
1844 | { |
1845 | if (m_register.pair.tagGPR == InvalidGPRReg && m_register.pair.payloadGPR == InvalidGPRReg) |
1846 | m_isDouble = !m_jit->fillJSValue(m_edge, m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr); |
1847 | } |
1848 | |
1849 | GPRReg tagGPR() |
1850 | { |
1851 | fill(); |
1852 | ASSERT(!m_isDouble); |
1853 | return m_register.pair.tagGPR; |
1854 | } |
1855 | |
1856 | GPRReg payloadGPR() |
1857 | { |
1858 | fill(); |
1859 | ASSERT(!m_isDouble); |
1860 | return m_register.pair.payloadGPR; |
1861 | } |
1862 | |
1863 | JSValueRegs jsValueRegs() |
1864 | { |
1865 | return JSValueRegs(tagGPR(), payloadGPR()); |
1866 | } |
1867 | |
1868 | GPRReg gpr(WhichValueWord which) |
1869 | { |
1870 | return jsValueRegs().gpr(which); |
1871 | } |
1872 | |
1873 | FPRReg fpr() |
1874 | { |
1875 | fill(); |
1876 | ASSERT(m_isDouble); |
1877 | return m_register.fpr; |
1878 | } |
1879 | #endif |
1880 | |
1881 | void use() |
1882 | { |
1883 | m_jit->use(node()); |
1884 | } |
1885 | |
1886 | private: |
1887 | SpeculativeJIT* m_jit; |
1888 | Edge m_edge; |
1889 | #if USE(JSVALUE64) |
1890 | GPRReg m_gprOrInvalid; |
1891 | #elif USE(JSVALUE32_64) |
1892 | union { |
1893 | struct { |
1894 | GPRReg tagGPR; |
1895 | GPRReg payloadGPR; |
1896 | } pair; |
1897 | FPRReg fpr; |
1898 | } m_register; |
1899 | bool m_isDouble; |
1900 | #endif |
1901 | }; |
1902 | |
1903 | class StorageOperand { |
1904 | public: |
1905 | explicit StorageOperand(SpeculativeJIT* jit, Edge edge) |
1906 | : m_jit(jit) |
1907 | , m_edge(edge) |
1908 | , m_gprOrInvalid(InvalidGPRReg) |
1909 | { |
1910 | ASSERT(m_jit); |
1911 | ASSERT(edge.useKind() == UntypedUse || edge.useKind() == KnownCellUse); |
1912 | if (jit->isFilled(node())) |
1913 | gpr(); |
1914 | } |
1915 | |
1916 | ~StorageOperand() |
1917 | { |
1918 | ASSERT(m_gprOrInvalid != InvalidGPRReg); |
1919 | m_jit->unlock(m_gprOrInvalid); |
1920 | } |
1921 | |
1922 | Edge edge() const |
1923 | { |
1924 | return m_edge; |
1925 | } |
1926 | |
1927 | Node* node() const |
1928 | { |
1929 | return edge().node(); |
1930 | } |
1931 | |
1932 | GPRReg gpr() |
1933 | { |
1934 | if (m_gprOrInvalid == InvalidGPRReg) |
1935 | m_gprOrInvalid = m_jit->fillStorage(edge()); |
1936 | return m_gprOrInvalid; |
1937 | } |
1938 | |
1939 | void use() |
1940 | { |
1941 | m_jit->use(node()); |
1942 | } |
1943 | |
1944 | private: |
1945 | SpeculativeJIT* m_jit; |
1946 | Edge m_edge; |
1947 | GPRReg m_gprOrInvalid; |
1948 | }; |
1949 | |
1950 | |
1951 | // === Temporaries === |
1952 | // |
1953 | // These classes are used to allocate temporary registers. |
1954 | // A mechanism is provided to attempt to reuse the registers |
1955 | // currently allocated to child nodes whose value is consumed |
1956 | // by, and not live after, this operation. |
1957 | |
1958 | enum ReuseTag { Reuse }; |
1959 | |
1960 | class GPRTemporary { |
1961 | public: |
1962 | GPRTemporary(); |
1963 | GPRTemporary(SpeculativeJIT*); |
1964 | GPRTemporary(SpeculativeJIT*, GPRReg specific); |
1965 | template<typename T> |
1966 | GPRTemporary(SpeculativeJIT* jit, ReuseTag, T& operand) |
1967 | : m_jit(jit) |
1968 | , m_gpr(InvalidGPRReg) |
1969 | { |
1970 | if (m_jit->canReuse(operand.node())) |
1971 | m_gpr = m_jit->reuse(operand.gpr()); |
1972 | else |
1973 | m_gpr = m_jit->allocate(); |
1974 | } |
1975 | template<typename T1, typename T2> |
1976 | GPRTemporary(SpeculativeJIT* jit, ReuseTag, T1& op1, T2& op2) |
1977 | : m_jit(jit) |
1978 | , m_gpr(InvalidGPRReg) |
1979 | { |
1980 | if (m_jit->canReuse(op1.node())) |
1981 | m_gpr = m_jit->reuse(op1.gpr()); |
1982 | else if (m_jit->canReuse(op2.node())) |
1983 | m_gpr = m_jit->reuse(op2.gpr()); |
1984 | else if (m_jit->canReuse(op1.node(), op2.node()) && op1.gpr() == op2.gpr()) |
1985 | m_gpr = m_jit->reuse(op1.gpr()); |
1986 | else |
1987 | m_gpr = m_jit->allocate(); |
1988 | } |
1989 | GPRTemporary(SpeculativeJIT*, ReuseTag, JSValueOperand&, WhichValueWord); |
1990 | |
1991 | GPRTemporary(GPRTemporary& other) = delete; |
1992 | |
1993 | GPRTemporary(GPRTemporary&& other) |
1994 | { |
1995 | ASSERT(other.m_jit); |
1996 | ASSERT(other.m_gpr != InvalidGPRReg); |
1997 | m_jit = other.m_jit; |
1998 | m_gpr = other.m_gpr; |
1999 | other.m_jit = nullptr; |
2000 | other.m_gpr = InvalidGPRReg; |
2001 | } |
2002 | |
2003 | GPRTemporary& operator=(GPRTemporary&& other) |
2004 | { |
2005 | ASSERT(!m_jit); |
2006 | ASSERT(m_gpr == InvalidGPRReg); |
2007 | std::swap(m_jit, other.m_jit); |
2008 | std::swap(m_gpr, other.m_gpr); |
2009 | return *this; |
2010 | } |
2011 | |
2012 | void adopt(GPRTemporary&); |
2013 | |
2014 | ~GPRTemporary() |
2015 | { |
2016 | if (m_jit && m_gpr != InvalidGPRReg) |
2017 | m_jit->unlock(gpr()); |
2018 | } |
2019 | |
2020 | GPRReg gpr() |
2021 | { |
2022 | return m_gpr; |
2023 | } |
2024 | |
2025 | private: |
2026 | SpeculativeJIT* m_jit; |
2027 | GPRReg m_gpr; |
2028 | }; |
2029 | |
2030 | class JSValueRegsTemporary { |
2031 | public: |
2032 | JSValueRegsTemporary(); |
2033 | JSValueRegsTemporary(SpeculativeJIT*); |
2034 | template<typename T> |
2035 | JSValueRegsTemporary(SpeculativeJIT*, ReuseTag, T& operand, WhichValueWord resultRegWord = PayloadWord); |
2036 | JSValueRegsTemporary(SpeculativeJIT*, ReuseTag, JSValueOperand&); |
2037 | ~JSValueRegsTemporary(); |
2038 | |
2039 | JSValueRegs regs(); |
2040 | |
2041 | private: |
2042 | #if USE(JSVALUE64) |
2043 | GPRTemporary m_gpr; |
2044 | #else |
2045 | GPRTemporary m_payloadGPR; |
2046 | GPRTemporary m_tagGPR; |
2047 | #endif |
2048 | }; |
2049 | |
2050 | class FPRTemporary { |
2051 | WTF_MAKE_NONCOPYABLE(FPRTemporary); |
2052 | public: |
2053 | FPRTemporary(FPRTemporary&&); |
2054 | FPRTemporary(SpeculativeJIT*); |
2055 | FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&); |
2056 | FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&, SpeculateDoubleOperand&); |
2057 | #if USE(JSVALUE32_64) |
2058 | FPRTemporary(SpeculativeJIT*, JSValueOperand&); |
2059 | #endif |
2060 | |
2061 | ~FPRTemporary() |
2062 | { |
2063 | if (LIKELY(m_jit)) |
2064 | m_jit->unlock(fpr()); |
2065 | } |
2066 | |
2067 | FPRReg fpr() const |
2068 | { |
2069 | ASSERT(m_jit); |
2070 | ASSERT(m_fpr != InvalidFPRReg); |
2071 | return m_fpr; |
2072 | } |
2073 | |
2074 | protected: |
2075 | FPRTemporary(SpeculativeJIT* jit, FPRReg lockedFPR) |
2076 | : m_jit(jit) |
2077 | , m_fpr(lockedFPR) |
2078 | { |
2079 | } |
2080 | |
2081 | private: |
2082 | SpeculativeJIT* m_jit; |
2083 | FPRReg m_fpr; |
2084 | }; |
2085 | |
2086 | |
2087 | // === Results === |
2088 | // |
2089 | // These classes lock the result of a call to a C++ helper function. |
2090 | |
2091 | class GPRFlushedCallResult : public GPRTemporary { |
2092 | public: |
2093 | GPRFlushedCallResult(SpeculativeJIT* jit) |
2094 | : GPRTemporary(jit, GPRInfo::returnValueGPR) |
2095 | { |
2096 | } |
2097 | }; |
2098 | |
2099 | #if USE(JSVALUE32_64) |
2100 | class GPRFlushedCallResult2 : public GPRTemporary { |
2101 | public: |
2102 | GPRFlushedCallResult2(SpeculativeJIT* jit) |
2103 | : GPRTemporary(jit, GPRInfo::returnValueGPR2) |
2104 | { |
2105 | } |
2106 | }; |
2107 | #endif |
2108 | |
2109 | class FPRResult : public FPRTemporary { |
2110 | public: |
2111 | FPRResult(SpeculativeJIT* jit) |
2112 | : FPRTemporary(jit, lockedResult(jit)) |
2113 | { |
2114 | } |
2115 | |
2116 | private: |
2117 | static FPRReg lockedResult(SpeculativeJIT* jit) |
2118 | { |
2119 | jit->lock(FPRInfo::returnValueFPR); |
2120 | return FPRInfo::returnValueFPR; |
2121 | } |
2122 | }; |
2123 | |
2124 | class JSValueRegsFlushedCallResult { |
2125 | public: |
2126 | JSValueRegsFlushedCallResult(SpeculativeJIT* jit) |
2127 | #if USE(JSVALUE64) |
2128 | : m_gpr(jit) |
2129 | #else |
2130 | : m_payloadGPR(jit) |
2131 | , m_tagGPR(jit) |
2132 | #endif |
2133 | { |
2134 | } |
2135 | |
2136 | JSValueRegs regs() |
2137 | { |
2138 | #if USE(JSVALUE64) |
2139 | return JSValueRegs { m_gpr.gpr() }; |
2140 | #else |
2141 | return JSValueRegs { m_tagGPR.gpr(), m_payloadGPR.gpr() }; |
2142 | #endif |
2143 | } |
2144 | |
2145 | private: |
2146 | #if USE(JSVALUE64) |
2147 | GPRFlushedCallResult m_gpr; |
2148 | #else |
2149 | GPRFlushedCallResult m_payloadGPR; |
2150 | GPRFlushedCallResult2 m_tagGPR; |
2151 | #endif |
2152 | }; |
2153 | |
2154 | |
2155 | // === Speculative Operand types === |
2156 | // |
2157 | // SpeculateInt32Operand, SpeculateStrictInt32Operand and SpeculateCellOperand. |
2158 | // |
2159 | // These are used to lock the operands to a node into machine registers within the |
2160 | // SpeculativeJIT. The classes operate like those above, however these will |
2161 | // perform a speculative check for a more restrictive type than we can statically |
2162 | // determine the operand to have. If the operand does not have the requested type, |
2163 | // a bail-out to the non-speculative path will be taken. |
2164 | |
2165 | class SpeculateInt32Operand { |
2166 | public: |
2167 | explicit SpeculateInt32Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) |
2168 | : m_jit(jit) |
2169 | , m_edge(edge) |
2170 | , m_gprOrInvalid(InvalidGPRReg) |
2171 | #ifndef NDEBUG |
2172 | , m_format(DataFormatNone) |
2173 | #endif |
2174 | { |
2175 | ASSERT(m_jit); |
2176 | ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use)); |
2177 | if (jit->isFilled(node())) |
2178 | gpr(); |
2179 | } |
2180 | |
2181 | ~SpeculateInt32Operand() |
2182 | { |
2183 | ASSERT(m_gprOrInvalid != InvalidGPRReg); |
2184 | m_jit->unlock(m_gprOrInvalid); |
2185 | } |
2186 | |
2187 | Edge edge() const |
2188 | { |
2189 | return m_edge; |
2190 | } |
2191 | |
2192 | Node* node() const |
2193 | { |
2194 | return edge().node(); |
2195 | } |
2196 | |
2197 | DataFormat format() |
2198 | { |
2199 | gpr(); // m_format is set when m_gpr is locked. |
2200 | ASSERT(m_format == DataFormatInt32 || m_format == DataFormatJSInt32); |
2201 | return m_format; |
2202 | } |
2203 | |
2204 | GPRReg gpr() |
2205 | { |
2206 | if (m_gprOrInvalid == InvalidGPRReg) |
2207 | m_gprOrInvalid = m_jit->fillSpeculateInt32(edge(), m_format); |
2208 | return m_gprOrInvalid; |
2209 | } |
2210 | |
2211 | void use() |
2212 | { |
2213 | m_jit->use(node()); |
2214 | } |
2215 | |
2216 | private: |
2217 | SpeculativeJIT* m_jit; |
2218 | Edge m_edge; |
2219 | GPRReg m_gprOrInvalid; |
2220 | DataFormat m_format; |
2221 | }; |
2222 | |
2223 | class SpeculateStrictInt32Operand { |
2224 | public: |
2225 | explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) |
2226 | : m_jit(jit) |
2227 | , m_edge(edge) |
2228 | , m_gprOrInvalid(InvalidGPRReg) |
2229 | { |
2230 | ASSERT(m_jit); |
2231 | ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use)); |
2232 | if (jit->isFilled(node())) |
2233 | gpr(); |
2234 | } |
2235 | |
2236 | ~SpeculateStrictInt32Operand() |
2237 | { |
2238 | ASSERT(m_gprOrInvalid != InvalidGPRReg); |
2239 | m_jit->unlock(m_gprOrInvalid); |
2240 | } |
2241 | |
2242 | Edge edge() const |
2243 | { |
2244 | return m_edge; |
2245 | } |
2246 | |
2247 | Node* node() const |
2248 | { |
2249 | return edge().node(); |
2250 | } |
2251 | |
2252 | GPRReg gpr() |
2253 | { |
2254 | if (m_gprOrInvalid == InvalidGPRReg) |
2255 | m_gprOrInvalid = m_jit->fillSpeculateInt32Strict(edge()); |
2256 | return m_gprOrInvalid; |
2257 | } |
2258 | |
2259 | void use() |
2260 | { |
2261 | m_jit->use(node()); |
2262 | } |
2263 | |
2264 | private: |
2265 | SpeculativeJIT* m_jit; |
2266 | Edge m_edge; |
2267 | GPRReg m_gprOrInvalid; |
2268 | }; |
2269 | |
2270 | // Gives you a canonical Int52 (i.e. it's left-shifted by 16, low bits zero). |
2271 | class SpeculateInt52Operand { |
2272 | public: |
2273 | explicit SpeculateInt52Operand(SpeculativeJIT* jit, Edge edge) |
2274 | : m_jit(jit) |
2275 | , m_edge(edge) |
2276 | , m_gprOrInvalid(InvalidGPRReg) |
2277 | { |
2278 | RELEASE_ASSERT(edge.useKind() == Int52RepUse); |
2279 | if (jit->isFilled(node())) |
2280 | gpr(); |
2281 | } |
2282 | |
2283 | ~SpeculateInt52Operand() |
2284 | { |
2285 | ASSERT(m_gprOrInvalid != InvalidGPRReg); |
2286 | m_jit->unlock(m_gprOrInvalid); |
2287 | } |
2288 | |
2289 | Edge edge() const |
2290 | { |
2291 | return m_edge; |
2292 | } |
2293 | |
2294 | Node* node() const |
2295 | { |
2296 | return edge().node(); |
2297 | } |
2298 | |
2299 | GPRReg gpr() |
2300 | { |
2301 | if (m_gprOrInvalid == InvalidGPRReg) |
2302 | m_gprOrInvalid = m_jit->fillSpeculateInt52(edge(), DataFormatInt52); |
2303 | return m_gprOrInvalid; |
2304 | } |
2305 | |
2306 | void use() |
2307 | { |
2308 | m_jit->use(node()); |
2309 | } |
2310 | |
2311 | private: |
2312 | SpeculativeJIT* m_jit; |
2313 | Edge m_edge; |
2314 | GPRReg m_gprOrInvalid; |
2315 | }; |
2316 | |
2317 | // Gives you a strict Int52 (i.e. the payload is in the low 48 bits, high 16 bits are sign-extended). |
2318 | class SpeculateStrictInt52Operand { |
2319 | public: |
2320 | explicit SpeculateStrictInt52Operand(SpeculativeJIT* jit, Edge edge) |
2321 | : m_jit(jit) |
2322 | , m_edge(edge) |
2323 | , m_gprOrInvalid(InvalidGPRReg) |
2324 | { |
2325 | RELEASE_ASSERT(edge.useKind() == Int52RepUse); |
2326 | if (jit->isFilled(node())) |
2327 | gpr(); |
2328 | } |
2329 | |
2330 | ~SpeculateStrictInt52Operand() |
2331 | { |
2332 | ASSERT(m_gprOrInvalid != InvalidGPRReg); |
2333 | m_jit->unlock(m_gprOrInvalid); |
2334 | } |
2335 | |
2336 | Edge edge() const |
2337 | { |
2338 | return m_edge; |
2339 | } |
2340 | |
2341 | Node* node() const |
2342 | { |
2343 | return edge().node(); |
2344 | } |
2345 | |
2346 | GPRReg gpr() |
2347 | { |
2348 | if (m_gprOrInvalid == InvalidGPRReg) |
2349 | m_gprOrInvalid = m_jit->fillSpeculateInt52(edge(), DataFormatStrictInt52); |
2350 | return m_gprOrInvalid; |
2351 | } |
2352 | |
2353 | void use() |
2354 | { |
2355 | m_jit->use(node()); |
2356 | } |
2357 | |
2358 | private: |
2359 | SpeculativeJIT* m_jit; |
2360 | Edge m_edge; |
2361 | GPRReg m_gprOrInvalid; |
2362 | }; |
2363 | |
2364 | enum OppositeShiftTag { OppositeShift }; |
2365 | |
2366 | class SpeculateWhicheverInt52Operand { |
2367 | public: |
2368 | explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge) |
2369 | : m_jit(jit) |
2370 | , m_edge(edge) |
2371 | , m_gprOrInvalid(InvalidGPRReg) |
2372 | , m_strict(jit->betterUseStrictInt52(edge)) |
2373 | { |
2374 | RELEASE_ASSERT(edge.useKind() == Int52RepUse); |
2375 | if (jit->isFilled(node())) |
2376 | gpr(); |
2377 | } |
2378 | |
2379 | explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, const SpeculateWhicheverInt52Operand& other) |
2380 | : m_jit(jit) |
2381 | , m_edge(edge) |
2382 | , m_gprOrInvalid(InvalidGPRReg) |
2383 | , m_strict(other.m_strict) |
2384 | { |
2385 | RELEASE_ASSERT(edge.useKind() == Int52RepUse); |
2386 | if (jit->isFilled(node())) |
2387 | gpr(); |
2388 | } |
2389 | |
2390 | explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OppositeShiftTag, const SpeculateWhicheverInt52Operand& other) |
2391 | : m_jit(jit) |
2392 | , m_edge(edge) |
2393 | , m_gprOrInvalid(InvalidGPRReg) |
2394 | , m_strict(!other.m_strict) |
2395 | { |
2396 | RELEASE_ASSERT(edge.useKind() == Int52RepUse); |
2397 | if (jit->isFilled(node())) |
2398 | gpr(); |
2399 | } |
2400 | |
2401 | ~SpeculateWhicheverInt52Operand() |
2402 | { |
2403 | ASSERT(m_gprOrInvalid != InvalidGPRReg); |
2404 | m_jit->unlock(m_gprOrInvalid); |
2405 | } |
2406 | |
2407 | Edge edge() const |
2408 | { |
2409 | return m_edge; |
2410 | } |
2411 | |
2412 | Node* node() const |
2413 | { |
2414 | return edge().node(); |
2415 | } |
2416 | |
2417 | GPRReg gpr() |
2418 | { |
2419 | if (m_gprOrInvalid == InvalidGPRReg) { |
2420 | m_gprOrInvalid = m_jit->fillSpeculateInt52( |
2421 | edge(), m_strict ? DataFormatStrictInt52 : DataFormatInt52); |
2422 | } |
2423 | return m_gprOrInvalid; |
2424 | } |
2425 | |
2426 | void use() |
2427 | { |
2428 | m_jit->use(node()); |
2429 | } |
2430 | |
2431 | DataFormat format() const |
2432 | { |
2433 | return m_strict ? DataFormatStrictInt52 : DataFormatInt52; |
2434 | } |
2435 | |
2436 | private: |
2437 | SpeculativeJIT* m_jit; |
2438 | Edge m_edge; |
2439 | GPRReg m_gprOrInvalid; |
2440 | bool m_strict; |
2441 | }; |
2442 | |
2443 | class SpeculateDoubleOperand { |
2444 | public: |
2445 | explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge edge) |
2446 | : m_jit(jit) |
2447 | , m_edge(edge) |
2448 | , m_fprOrInvalid(InvalidFPRReg) |
2449 | { |
2450 | ASSERT(m_jit); |
2451 | RELEASE_ASSERT(isDouble(edge.useKind())); |
2452 | if (jit->isFilled(node())) |
2453 | fpr(); |
2454 | } |
2455 | |
2456 | ~SpeculateDoubleOperand() |
2457 | { |
2458 | ASSERT(m_fprOrInvalid != InvalidFPRReg); |
2459 | m_jit->unlock(m_fprOrInvalid); |
2460 | } |
2461 | |
2462 | Edge edge() const |
2463 | { |
2464 | return m_edge; |
2465 | } |
2466 | |
2467 | Node* node() const |
2468 | { |
2469 | return edge().node(); |
2470 | } |
2471 | |
2472 | FPRReg fpr() |
2473 | { |
2474 | if (m_fprOrInvalid == InvalidFPRReg) |
2475 | m_fprOrInvalid = m_jit->fillSpeculateDouble(edge()); |
2476 | return m_fprOrInvalid; |
2477 | } |
2478 | |
2479 | void use() |
2480 | { |
2481 | m_jit->use(node()); |
2482 | } |
2483 | |
2484 | private: |
2485 | SpeculativeJIT* m_jit; |
2486 | Edge m_edge; |
2487 | FPRReg m_fprOrInvalid; |
2488 | }; |
2489 | |
2490 | class SpeculateCellOperand { |
2491 | WTF_MAKE_NONCOPYABLE(SpeculateCellOperand); |
2492 | |
2493 | public: |
2494 | explicit SpeculateCellOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) |
2495 | : m_jit(jit) |
2496 | , m_edge(edge) |
2497 | , m_gprOrInvalid(InvalidGPRReg) |
2498 | { |
2499 | ASSERT(m_jit); |
2500 | if (!edge) |
2501 | return; |
2502 | ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || isCell(edge.useKind())); |
2503 | if (jit->isFilled(node())) |
2504 | gpr(); |
2505 | } |
2506 | |
2507 | explicit SpeculateCellOperand(SpeculateCellOperand&& other) |
2508 | { |
2509 | m_jit = other.m_jit; |
2510 | m_edge = other.m_edge; |
2511 | m_gprOrInvalid = other.m_gprOrInvalid; |
2512 | |
2513 | other.m_gprOrInvalid = InvalidGPRReg; |
2514 | other.m_edge = Edge(); |
2515 | } |
2516 | |
2517 | ~SpeculateCellOperand() |
2518 | { |
2519 | if (!m_edge) |
2520 | return; |
2521 | ASSERT(m_gprOrInvalid != InvalidGPRReg); |
2522 | m_jit->unlock(m_gprOrInvalid); |
2523 | } |
2524 | |
2525 | Edge edge() const |
2526 | { |
2527 | return m_edge; |
2528 | } |
2529 | |
2530 | Node* node() const |
2531 | { |
2532 | return edge().node(); |
2533 | } |
2534 | |
2535 | GPRReg gpr() |
2536 | { |
2537 | ASSERT(m_edge); |
2538 | if (m_gprOrInvalid == InvalidGPRReg) |
2539 | m_gprOrInvalid = m_jit->fillSpeculateCell(edge()); |
2540 | return m_gprOrInvalid; |
2541 | } |
2542 | |
2543 | void use() |
2544 | { |
2545 | ASSERT(m_edge); |
2546 | m_jit->use(node()); |
2547 | } |
2548 | |
2549 | private: |
2550 | SpeculativeJIT* m_jit; |
2551 | Edge m_edge; |
2552 | GPRReg m_gprOrInvalid; |
2553 | }; |
2554 | |
2555 | class SpeculateBooleanOperand { |
2556 | public: |
2557 | explicit SpeculateBooleanOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) |
2558 | : m_jit(jit) |
2559 | , m_edge(edge) |
2560 | , m_gprOrInvalid(InvalidGPRReg) |
2561 | { |
2562 | ASSERT(m_jit); |
2563 | ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse || edge.useKind() == KnownBooleanUse); |
2564 | if (jit->isFilled(node())) |
2565 | gpr(); |
2566 | } |
2567 | |
2568 | ~SpeculateBooleanOperand() |
2569 | { |
2570 | ASSERT(m_gprOrInvalid != InvalidGPRReg); |
2571 | m_jit->unlock(m_gprOrInvalid); |
2572 | } |
2573 | |
2574 | Edge edge() const |
2575 | { |
2576 | return m_edge; |
2577 | } |
2578 | |
2579 | Node* node() const |
2580 | { |
2581 | return edge().node(); |
2582 | } |
2583 | |
2584 | GPRReg gpr() |
2585 | { |
2586 | if (m_gprOrInvalid == InvalidGPRReg) |
2587 | m_gprOrInvalid = m_jit->fillSpeculateBoolean(edge()); |
2588 | return m_gprOrInvalid; |
2589 | } |
2590 | |
2591 | void use() |
2592 | { |
2593 | m_jit->use(node()); |
2594 | } |
2595 | |
2596 | private: |
2597 | SpeculativeJIT* m_jit; |
2598 | Edge m_edge; |
2599 | GPRReg m_gprOrInvalid; |
2600 | }; |
2601 | |
2602 | #define DFG_TYPE_CHECK_WITH_EXIT_KIND(exitKind, source, edge, typesPassedThrough, jumpToFail) do { \ |
2603 | JSValueSource _dtc_source = (source); \ |
2604 | Edge _dtc_edge = (edge); \ |
2605 | SpeculatedType _dtc_typesPassedThrough = typesPassedThrough; \ |
2606 | if (!needsTypeCheck(_dtc_edge, _dtc_typesPassedThrough)) \ |
2607 | break; \ |
2608 | typeCheck(_dtc_source, _dtc_edge, _dtc_typesPassedThrough, (jumpToFail), exitKind); \ |
2609 | } while (0) |
2610 | |
2611 | #define DFG_TYPE_CHECK(source, edge, typesPassedThrough, jumpToFail) \ |
2612 | DFG_TYPE_CHECK_WITH_EXIT_KIND(BadType, source, edge, typesPassedThrough, jumpToFail) |
2613 | |
2614 | } } // namespace JSC::DFG |
2615 | |
2616 | #endif |
2617 | |