1 | /* |
2 | * Copyright (C) 2009-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2010 Patrick Gansterer <[email protected]> |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * 1. Redistributions of source code must retain the above copyright |
9 | * notice, this list of conditions and the following disclaimer. |
10 | * 2. Redistributions in binary form must reproduce the above copyright |
11 | * notice, this list of conditions and the following disclaimer in the |
12 | * documentation and/or other materials provided with the distribution. |
13 | * |
14 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | */ |
26 | |
27 | #include "config.h" |
28 | |
29 | #if ENABLE(JIT) |
30 | #if USE(JSVALUE32_64) |
31 | #include "JIT.h" |
32 | |
33 | #include "BytecodeStructs.h" |
34 | #include "CCallHelpers.h" |
35 | #include "Exception.h" |
36 | #include "JITInlines.h" |
37 | #include "JSArray.h" |
38 | #include "JSCast.h" |
39 | #include "JSFunction.h" |
40 | #include "JSPropertyNameEnumerator.h" |
41 | #include "LinkBuffer.h" |
42 | #include "MaxFrameExtentForSlowPathCall.h" |
43 | #include "OpcodeInlines.h" |
44 | #include "SlowPathCall.h" |
45 | #include "TypeProfilerLog.h" |
46 | #include "VirtualRegister.h" |
47 | |
48 | namespace JSC { |
49 | |
50 | void JIT::emit_op_mov(const Instruction* currentInstruction) |
51 | { |
52 | auto bytecode = currentInstruction->as<OpMov>(); |
53 | int dst = bytecode.m_dst.offset(); |
54 | int src = bytecode.m_src.offset(); |
55 | |
56 | if (m_codeBlock->isConstantRegisterIndex(src)) |
57 | emitStore(dst, getConstantOperand(src)); |
58 | else { |
59 | emitLoad(src, regT1, regT0); |
60 | emitStore(dst, regT1, regT0); |
61 | } |
62 | } |
63 | |
64 | void JIT::emit_op_end(const Instruction* currentInstruction) |
65 | { |
66 | ASSERT(returnValueGPR != callFrameRegister); |
67 | auto bytecode = currentInstruction->as<OpEnd>(); |
68 | emitLoad(bytecode.m_value.offset(), regT1, returnValueGPR); |
69 | emitRestoreCalleeSaves(); |
70 | emitFunctionEpilogue(); |
71 | ret(); |
72 | } |
73 | |
74 | void JIT::emit_op_jmp(const Instruction* currentInstruction) |
75 | { |
76 | auto bytecode = currentInstruction->as<OpJmp>(); |
77 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
78 | addJump(jump(), target); |
79 | } |
80 | |
81 | void JIT::emit_op_new_object(const Instruction* currentInstruction) |
82 | { |
83 | auto bytecode = currentInstruction->as<OpNewObject>(); |
84 | auto& metadata = bytecode.metadata(m_codeBlock); |
85 | Structure* structure = metadata.m_objectAllocationProfile.structure(); |
86 | size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); |
87 | Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(*m_vm, allocationSize, AllocatorForMode::AllocatorIfExists); |
88 | |
89 | RegisterID resultReg = returnValueGPR; |
90 | RegisterID allocatorReg = regT1; |
91 | RegisterID scratchReg = regT3; |
92 | |
93 | if (!allocator) |
94 | addSlowCase(jump()); |
95 | else { |
96 | JumpList slowCases; |
97 | auto butterfly = TrustedImmPtr(nullptr); |
98 | emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, scratchReg, slowCases); |
99 | emitInitializeInlineStorage(resultReg, structure->inlineCapacity()); |
100 | addSlowCase(slowCases); |
101 | emitStoreCell(bytecode.m_dst.offset(), resultReg); |
102 | } |
103 | } |
104 | |
105 | void JIT::emitSlow_op_new_object(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
106 | { |
107 | linkAllSlowCases(iter); |
108 | |
109 | auto bytecode = currentInstruction->as<OpNewObject>(); |
110 | auto& metadata = bytecode.metadata(m_codeBlock); |
111 | int dst = bytecode.m_dst.offset(); |
112 | Structure* structure = metadata.m_objectAllocationProfile.structure(); |
113 | callOperation(operationNewObject, structure); |
114 | emitStoreCell(dst, returnValueGPR); |
115 | } |
116 | |
117 | void JIT::emit_op_overrides_has_instance(const Instruction* currentInstruction) |
118 | { |
119 | auto bytecode = currentInstruction->as<OpOverridesHasInstance>(); |
120 | int dst = bytecode.m_dst.offset(); |
121 | int constructor = bytecode.m_constructor.offset(); |
122 | int hasInstanceValue = bytecode.m_hasInstanceValue.offset(); |
123 | |
124 | emitLoadPayload(hasInstanceValue, regT0); |
125 | // We don't jump if we know what Symbol.hasInstance would do. |
126 | Jump hasInstanceValueNotCell = emitJumpIfNotJSCell(hasInstanceValue); |
127 | Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction())); |
128 | |
129 | // We know that constructor is an object from the way bytecode is emitted for instanceof expressions. |
130 | emitLoadPayload(constructor, regT0); |
131 | |
132 | // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function. |
133 | test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0); |
134 | Jump done = jump(); |
135 | |
136 | hasInstanceValueNotCell.link(this); |
137 | customhasInstanceValue.link(this); |
138 | move(TrustedImm32(1), regT0); |
139 | |
140 | done.link(this); |
141 | emitStoreBool(dst, regT0); |
142 | |
143 | } |
144 | |
145 | void JIT::emit_op_instanceof(const Instruction* currentInstruction) |
146 | { |
147 | auto bytecode = currentInstruction->as<OpInstanceof>(); |
148 | int dst = bytecode.m_dst.offset(); |
149 | int value = bytecode.m_value.offset(); |
150 | int proto = bytecode.m_prototype.offset(); |
151 | |
152 | // Load the operands into registers. |
153 | // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. |
154 | emitLoadPayload(value, regT2); |
155 | emitLoadPayload(proto, regT1); |
156 | |
157 | // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance. |
158 | emitJumpSlowCaseIfNotJSCell(value); |
159 | emitJumpSlowCaseIfNotJSCell(proto); |
160 | |
161 | JITInstanceOfGenerator gen( |
162 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), |
163 | RegisterSet::stubUnavailableRegisters(), |
164 | regT0, // result |
165 | regT2, // value |
166 | regT1, // proto |
167 | regT3, regT4); // scratch |
168 | gen.generateFastPath(*this); |
169 | m_instanceOfs.append(gen); |
170 | |
171 | emitStoreBool(dst, regT0); |
172 | } |
173 | |
174 | void JIT::emit_op_instanceof_custom(const Instruction*) |
175 | { |
176 | // This always goes to slow path since we expect it to be rare. |
177 | addSlowCase(jump()); |
178 | } |
179 | |
180 | void JIT::emitSlow_op_instanceof(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
181 | { |
182 | linkAllSlowCases(iter); |
183 | |
184 | auto bytecode = currentInstruction->as<OpInstanceof>(); |
185 | int dst = bytecode.m_dst.offset(); |
186 | int value = bytecode.m_value.offset(); |
187 | int proto = bytecode.m_prototype.offset(); |
188 | |
189 | JITInstanceOfGenerator& gen = m_instanceOfs[m_instanceOfIndex++]; |
190 | |
191 | Label coldPathBegin = label(); |
192 | emitLoadTag(value, regT0); |
193 | emitLoadTag(proto, regT3); |
194 | Call call = callOperation(operationInstanceOfOptimize, dst, gen.stubInfo(), JSValueRegs(regT0, regT2), JSValueRegs(regT3, regT1)); |
195 | gen.reportSlowPathCall(coldPathBegin, call); |
196 | } |
197 | |
198 | void JIT::emitSlow_op_instanceof_custom(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
199 | { |
200 | linkAllSlowCases(iter); |
201 | |
202 | auto bytecode = currentInstruction->as<OpInstanceofCustom>(); |
203 | int dst = bytecode.m_dst.offset(); |
204 | int value = bytecode.m_value.offset(); |
205 | int constructor = bytecode.m_constructor.offset(); |
206 | int hasInstanceValue = bytecode.m_hasInstanceValue.offset(); |
207 | |
208 | emitLoad(value, regT1, regT0); |
209 | emitLoadPayload(constructor, regT2); |
210 | emitLoad(hasInstanceValue, regT4, regT3); |
211 | callOperation(operationInstanceOfCustom, JSValueRegs(regT1, regT0), regT2, JSValueRegs(regT4, regT3)); |
212 | emitStoreBool(dst, returnValueGPR); |
213 | } |
214 | |
215 | void JIT::emit_op_is_empty(const Instruction* currentInstruction) |
216 | { |
217 | auto bytecode = currentInstruction->as<OpIsEmpty>(); |
218 | int dst = bytecode.m_dst.offset(); |
219 | int value = bytecode.m_operand.offset(); |
220 | |
221 | emitLoad(value, regT1, regT0); |
222 | compare32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag), regT0); |
223 | |
224 | emitStoreBool(dst, regT0); |
225 | } |
226 | |
227 | void JIT::emit_op_is_undefined(const Instruction* currentInstruction) |
228 | { |
229 | auto bytecode = currentInstruction->as<OpIsUndefined>(); |
230 | int dst = bytecode.m_dst.offset(); |
231 | int value = bytecode.m_operand.offset(); |
232 | |
233 | emitLoad(value, regT1, regT0); |
234 | Jump isCell = branchIfCell(regT1); |
235 | |
236 | compare32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag), regT0); |
237 | Jump done = jump(); |
238 | |
239 | isCell.link(this); |
240 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
241 | move(TrustedImm32(0), regT0); |
242 | Jump notMasqueradesAsUndefined = jump(); |
243 | |
244 | isMasqueradesAsUndefined.link(this); |
245 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT1); |
246 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
247 | loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); |
248 | compare32(Equal, regT0, regT1, regT0); |
249 | |
250 | notMasqueradesAsUndefined.link(this); |
251 | done.link(this); |
252 | emitStoreBool(dst, regT0); |
253 | } |
254 | |
255 | void JIT::emit_op_is_undefined_or_null(const Instruction* currentInstruction) |
256 | { |
257 | auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>(); |
258 | int dst = bytecode.m_dst.offset(); |
259 | int value = bytecode.m_operand.offset(); |
260 | |
261 | emitLoadTag(value, regT0); |
262 | static_assert((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1), "" ); |
263 | or32(TrustedImm32(1), regT0); |
264 | compare32(Equal, regT0, TrustedImm32(JSValue::NullTag), regT0); |
265 | emitStoreBool(dst, regT0); |
266 | } |
267 | |
268 | void JIT::emit_op_is_boolean(const Instruction* currentInstruction) |
269 | { |
270 | auto bytecode = currentInstruction->as<OpIsBoolean>(); |
271 | int dst = bytecode.m_dst.offset(); |
272 | int value = bytecode.m_operand.offset(); |
273 | |
274 | emitLoadTag(value, regT0); |
275 | compare32(Equal, regT0, TrustedImm32(JSValue::BooleanTag), regT0); |
276 | emitStoreBool(dst, regT0); |
277 | } |
278 | |
279 | void JIT::emit_op_is_number(const Instruction* currentInstruction) |
280 | { |
281 | auto bytecode = currentInstruction->as<OpIsNumber>(); |
282 | int dst = bytecode.m_dst.offset(); |
283 | int value = bytecode.m_operand.offset(); |
284 | |
285 | emitLoadTag(value, regT0); |
286 | add32(TrustedImm32(1), regT0); |
287 | compare32(Below, regT0, TrustedImm32(JSValue::LowestTag + 1), regT0); |
288 | emitStoreBool(dst, regT0); |
289 | } |
290 | |
291 | void JIT::emit_op_is_cell_with_type(const Instruction* currentInstruction) |
292 | { |
293 | auto bytecode = currentInstruction->as<OpIsCellWithType>(); |
294 | int dst = bytecode.m_dst.offset(); |
295 | int value = bytecode.m_operand.offset(); |
296 | int type = bytecode.m_type; |
297 | |
298 | emitLoad(value, regT1, regT0); |
299 | Jump isNotCell = branchIfNotCell(regT1); |
300 | |
301 | compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(type), regT0); |
302 | Jump done = jump(); |
303 | |
304 | isNotCell.link(this); |
305 | move(TrustedImm32(0), regT0); |
306 | |
307 | done.link(this); |
308 | emitStoreBool(dst, regT0); |
309 | } |
310 | |
311 | void JIT::emit_op_is_object(const Instruction* currentInstruction) |
312 | { |
313 | auto bytecode = currentInstruction->as<OpIsObject>(); |
314 | int dst = bytecode.m_dst.offset(); |
315 | int value = bytecode.m_operand.offset(); |
316 | |
317 | emitLoad(value, regT1, regT0); |
318 | Jump isNotCell = branchIfNotCell(regT1); |
319 | |
320 | compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); |
321 | Jump done = jump(); |
322 | |
323 | isNotCell.link(this); |
324 | move(TrustedImm32(0), regT0); |
325 | |
326 | done.link(this); |
327 | emitStoreBool(dst, regT0); |
328 | } |
329 | |
330 | void JIT::emit_op_to_primitive(const Instruction* currentInstruction) |
331 | { |
332 | auto bytecode = currentInstruction->as<OpToPrimitive>(); |
333 | int dst = bytecode.m_dst.offset(); |
334 | int src = bytecode.m_src.offset(); |
335 | |
336 | emitLoad(src, regT1, regT0); |
337 | |
338 | Jump isImm = branchIfNotCell(regT1); |
339 | addSlowCase(branchIfObject(regT0)); |
340 | isImm.link(this); |
341 | |
342 | if (dst != src) |
343 | emitStore(dst, regT1, regT0); |
344 | } |
345 | |
346 | void JIT::emit_op_set_function_name(const Instruction* currentInstruction) |
347 | { |
348 | auto bytecode = currentInstruction->as<OpSetFunctionName>(); |
349 | int func = bytecode.m_function.offset(); |
350 | int name = bytecode.m_name.offset(); |
351 | emitLoadPayload(func, regT1); |
352 | emitLoad(name, regT3, regT2); |
353 | callOperation(operationSetFunctionName, regT1, JSValueRegs(regT3, regT2)); |
354 | } |
355 | |
356 | void JIT::emit_op_not(const Instruction* currentInstruction) |
357 | { |
358 | auto bytecode = currentInstruction->as<OpNot>(); |
359 | int dst = bytecode.m_dst.offset(); |
360 | int src = bytecode.m_operand.offset(); |
361 | |
362 | emitLoadTag(src, regT0); |
363 | |
364 | emitLoad(src, regT1, regT0); |
365 | addSlowCase(branchIfNotBoolean(regT1, InvalidGPRReg)); |
366 | xor32(TrustedImm32(1), regT0); |
367 | |
368 | emitStoreBool(dst, regT0, (dst == src)); |
369 | } |
370 | |
371 | void JIT::emit_op_jfalse(const Instruction* currentInstruction) |
372 | { |
373 | auto bytecode = currentInstruction->as<OpJfalse>(); |
374 | int cond = bytecode.m_condition.offset(); |
375 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
376 | |
377 | emitLoad(cond, regT1, regT0); |
378 | |
379 | JSValueRegs value(regT1, regT0); |
380 | GPRReg scratch1 = regT2; |
381 | GPRReg scratch2 = regT3; |
382 | bool shouldCheckMasqueradesAsUndefined = true; |
383 | addJump(branchIfFalsey(*vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); |
384 | } |
385 | |
386 | void JIT::emit_op_jtrue(const Instruction* currentInstruction) |
387 | { |
388 | auto bytecode = currentInstruction->as<OpJtrue>(); |
389 | int cond = bytecode.m_condition.offset(); |
390 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
391 | |
392 | emitLoad(cond, regT1, regT0); |
393 | bool shouldCheckMasqueradesAsUndefined = true; |
394 | JSValueRegs value(regT1, regT0); |
395 | GPRReg scratch1 = regT2; |
396 | GPRReg scratch2 = regT3; |
397 | addJump(branchIfTruthy(*vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); |
398 | } |
399 | |
400 | void JIT::emit_op_jeq_null(const Instruction* currentInstruction) |
401 | { |
402 | auto bytecode = currentInstruction->as<OpJeqNull>(); |
403 | int src = bytecode.m_value.offset(); |
404 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
405 | |
406 | emitLoad(src, regT1, regT0); |
407 | |
408 | Jump isImmediate = branchIfNotCell(regT1); |
409 | |
410 | Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
411 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); |
412 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
413 | addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); |
414 | Jump masqueradesGlobalObjectIsForeign = jump(); |
415 | |
416 | // Now handle the immediate cases - undefined & null |
417 | isImmediate.link(this); |
418 | static_assert((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1), "" ); |
419 | or32(TrustedImm32(1), regT1); |
420 | addJump(branchIfNull(regT1), target); |
421 | |
422 | isNotMasqueradesAsUndefined.link(this); |
423 | masqueradesGlobalObjectIsForeign.link(this); |
424 | } |
425 | |
426 | void JIT::emit_op_jneq_null(const Instruction* currentInstruction) |
427 | { |
428 | auto bytecode = currentInstruction->as<OpJneqNull>(); |
429 | int src = bytecode.m_value.offset(); |
430 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
431 | |
432 | emitLoad(src, regT1, regT0); |
433 | |
434 | Jump isImmediate = branchIfNotCell(regT1); |
435 | |
436 | addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); |
437 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); |
438 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
439 | addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); |
440 | Jump wasNotImmediate = jump(); |
441 | |
442 | // Now handle the immediate cases - undefined & null |
443 | isImmediate.link(this); |
444 | |
445 | static_assert((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1), "" ); |
446 | or32(TrustedImm32(1), regT1); |
447 | addJump(branchIfNotNull(regT1), target); |
448 | |
449 | wasNotImmediate.link(this); |
450 | } |
451 | |
452 | void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction) |
453 | { |
454 | auto bytecode = currentInstruction->as<OpJneqPtr>(); |
455 | auto& metadata = bytecode.metadata(m_codeBlock); |
456 | int src = bytecode.m_value.offset(); |
457 | Special::Pointer ptr = bytecode.m_specialPointer; |
458 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
459 | |
460 | emitLoad(src, regT1, regT0); |
461 | Jump notCell = branchIfNotCell(regT1); |
462 | Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))); |
463 | notCell.link(this); |
464 | store8(TrustedImm32(1), &metadata.m_hasJumped); |
465 | addJump(jump(), target); |
466 | equal.link(this); |
467 | } |
468 | |
469 | void JIT::emit_op_eq(const Instruction* currentInstruction) |
470 | { |
471 | auto bytecode = currentInstruction->as<OpEq>(); |
472 | |
473 | int dst = bytecode.m_dst.offset(); |
474 | int src1 = bytecode.m_lhs.offset(); |
475 | int src2 = bytecode.m_rhs.offset(); |
476 | |
477 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
478 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
479 | addSlowCase(branchIfCell(regT1)); |
480 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
481 | |
482 | compare32(Equal, regT0, regT2, regT0); |
483 | |
484 | emitStoreBool(dst, regT0); |
485 | } |
486 | |
487 | void JIT::emitSlow_op_eq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
488 | { |
489 | auto bytecode = currentInstruction->as<OpEq>(); |
490 | int dst = bytecode.m_dst.offset(); |
491 | |
492 | JumpList storeResult; |
493 | JumpList genericCase; |
494 | |
495 | genericCase.append(getSlowCase(iter)); // tags not equal |
496 | |
497 | linkSlowCase(iter); // tags equal and JSCell |
498 | genericCase.append(branchIfNotString(regT0)); |
499 | genericCase.append(branchIfNotString(regT2)); |
500 | |
501 | // String case. |
502 | callOperation(operationCompareStringEq, regT0, regT2); |
503 | storeResult.append(jump()); |
504 | |
505 | // Generic case. |
506 | genericCase.append(getSlowCase(iter)); // doubles |
507 | genericCase.link(this); |
508 | callOperation(operationCompareEq, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
509 | |
510 | storeResult.link(this); |
511 | emitStoreBool(dst, returnValueGPR); |
512 | } |
513 | |
514 | void JIT::emit_op_jeq(const Instruction* currentInstruction) |
515 | { |
516 | auto bytecode = currentInstruction->as<OpJeq>(); |
517 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
518 | int src1 = bytecode.m_lhs.offset(); |
519 | int src2 = bytecode.m_rhs.offset(); |
520 | |
521 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
522 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
523 | addSlowCase(branchIfCell(regT1)); |
524 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
525 | |
526 | addJump(branch32(Equal, regT0, regT2), target); |
527 | } |
528 | |
529 | void JIT::compileOpEqJumpSlow(Vector<SlowCaseEntry>::iterator& iter, CompileOpEqType type, int jumpTarget) |
530 | { |
531 | JumpList done; |
532 | JumpList genericCase; |
533 | |
534 | genericCase.append(getSlowCase(iter)); // tags not equal |
535 | |
536 | linkSlowCase(iter); // tags equal and JSCell |
537 | genericCase.append(branchIfNotString(regT0)); |
538 | genericCase.append(branchIfNotString(regT2)); |
539 | |
540 | // String case. |
541 | callOperation(operationCompareStringEq, regT0, regT2); |
542 | emitJumpSlowToHot(branchTest32(type == CompileOpEqType::Eq ? NonZero : Zero, returnValueGPR), jumpTarget); |
543 | done.append(jump()); |
544 | |
545 | // Generic case. |
546 | genericCase.append(getSlowCase(iter)); // doubles |
547 | genericCase.link(this); |
548 | callOperation(operationCompareEq, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
549 | emitJumpSlowToHot(branchTest32(type == CompileOpEqType::Eq ? NonZero : Zero, returnValueGPR), jumpTarget); |
550 | |
551 | done.link(this); |
552 | } |
553 | |
554 | void JIT::emitSlow_op_jeq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
555 | { |
556 | auto bytecode = currentInstruction->as<OpJeq>(); |
557 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
558 | compileOpEqJumpSlow(iter, CompileOpEqType::Eq, target); |
559 | } |
560 | |
561 | void JIT::emit_op_neq(const Instruction* currentInstruction) |
562 | { |
563 | auto bytecode = currentInstruction->as<OpNeq>(); |
564 | int dst = bytecode.m_dst.offset(); |
565 | int src1 = bytecode.m_lhs.offset(); |
566 | int src2 = bytecode.m_rhs.offset(); |
567 | |
568 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
569 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
570 | addSlowCase(branchIfCell(regT1)); |
571 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
572 | |
573 | compare32(NotEqual, regT0, regT2, regT0); |
574 | |
575 | emitStoreBool(dst, regT0); |
576 | } |
577 | |
578 | void JIT::emitSlow_op_neq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
579 | { |
580 | auto bytecode = currentInstruction->as<OpNeq>(); |
581 | int dst = bytecode.m_dst.offset(); |
582 | |
583 | JumpList storeResult; |
584 | JumpList genericCase; |
585 | |
586 | genericCase.append(getSlowCase(iter)); // tags not equal |
587 | |
588 | linkSlowCase(iter); // tags equal and JSCell |
589 | genericCase.append(branchIfNotString(regT0)); |
590 | genericCase.append(branchIfNotString(regT2)); |
591 | |
592 | // String case. |
593 | callOperation(operationCompareStringEq, regT0, regT2); |
594 | storeResult.append(jump()); |
595 | |
596 | // Generic case. |
597 | genericCase.append(getSlowCase(iter)); // doubles |
598 | genericCase.link(this); |
599 | callOperation(operationCompareEq, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
600 | |
601 | storeResult.link(this); |
602 | xor32(TrustedImm32(0x1), returnValueGPR); |
603 | emitStoreBool(dst, returnValueGPR); |
604 | } |
605 | |
606 | void JIT::emit_op_jneq(const Instruction* currentInstruction) |
607 | { |
608 | auto bytecode = currentInstruction->as<OpJneq>(); |
609 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
610 | int src1 = bytecode.m_lhs.offset(); |
611 | int src2 = bytecode.m_rhs.offset(); |
612 | |
613 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
614 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
615 | addSlowCase(branchIfCell(regT1)); |
616 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
617 | |
618 | addJump(branch32(NotEqual, regT0, regT2), target); |
619 | } |
620 | |
621 | void JIT::emitSlow_op_jneq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
622 | { |
623 | auto bytecode = currentInstruction->as<OpJneq>(); |
624 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
625 | compileOpEqJumpSlow(iter, CompileOpEqType::NEq, target); |
626 | } |
627 | |
628 | template <typename Op> |
629 | void JIT::compileOpStrictEq(const Instruction* currentInstruction, CompileOpStrictEqType type) |
630 | { |
631 | auto bytecode = currentInstruction->as<Op>(); |
632 | int dst = bytecode.m_dst.offset(); |
633 | int src1 = bytecode.m_lhs.offset(); |
634 | int src2 = bytecode.m_rhs.offset(); |
635 | |
636 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
637 | |
638 | // Bail if the tags differ, or are double. |
639 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
640 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
641 | |
642 | // Jump to a slow case if both are strings or symbols (non object). |
643 | Jump notCell = branchIfNotCell(regT1); |
644 | Jump firstIsObject = branchIfObject(regT0); |
645 | addSlowCase(branchIfNotObject(regT2)); |
646 | notCell.link(this); |
647 | firstIsObject.link(this); |
648 | |
649 | // Simply compare the payloads. |
650 | if (type == CompileOpStrictEqType::StrictEq) |
651 | compare32(Equal, regT0, regT2, regT0); |
652 | else |
653 | compare32(NotEqual, regT0, regT2, regT0); |
654 | |
655 | emitStoreBool(dst, regT0); |
656 | } |
657 | |
658 | void JIT::emit_op_stricteq(const Instruction* currentInstruction) |
659 | { |
660 | compileOpStrictEq<OpStricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); |
661 | } |
662 | |
663 | void JIT::emit_op_nstricteq(const Instruction* currentInstruction) |
664 | { |
665 | compileOpStrictEq<OpNstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); |
666 | } |
667 | |
668 | template<typename Op> |
669 | void JIT::compileOpStrictEqJump(const Instruction* currentInstruction, CompileOpStrictEqType type) |
670 | { |
671 | auto bytecode = currentInstruction->as<Op>(); |
672 | int target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
673 | int src1 = bytecode.m_lhs.offset(); |
674 | int src2 = bytecode.m_rhs.offset(); |
675 | |
676 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
677 | |
678 | // Bail if the tags differ, or are double. |
679 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
680 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
681 | |
682 | // Jump to a slow case if both are strings or symbols (non object). |
683 | Jump notCell = branchIfNotCell(regT1); |
684 | Jump firstIsObject = branchIfObject(regT0); |
685 | addSlowCase(branchIfNotObject(regT2)); |
686 | notCell.link(this); |
687 | firstIsObject.link(this); |
688 | |
689 | // Simply compare the payloads. |
690 | if (type == CompileOpStrictEqType::StrictEq) |
691 | addJump(branch32(Equal, regT0, regT2), target); |
692 | else |
693 | addJump(branch32(NotEqual, regT0, regT2), target); |
694 | } |
695 | |
696 | void JIT::emit_op_jstricteq(const Instruction* currentInstruction) |
697 | { |
698 | compileOpStrictEqJump<OpJstricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); |
699 | } |
700 | |
701 | void JIT::emit_op_jnstricteq(const Instruction* currentInstruction) |
702 | { |
703 | compileOpStrictEqJump<OpJnstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); |
704 | } |
705 | |
706 | void JIT::emitSlow_op_jstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
707 | { |
708 | linkAllSlowCases(iter); |
709 | |
710 | auto bytecode = currentInstruction->as<OpJstricteq>(); |
711 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
712 | callOperation(operationCompareStrictEq, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
713 | emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target); |
714 | } |
715 | |
716 | void JIT::emitSlow_op_jnstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
717 | { |
718 | linkAllSlowCases(iter); |
719 | |
720 | auto bytecode = currentInstruction->as<OpJnstricteq>(); |
721 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
722 | callOperation(operationCompareStrictEq, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
723 | emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); |
724 | } |
725 | |
726 | void JIT::emit_op_eq_null(const Instruction* currentInstruction) |
727 | { |
728 | auto bytecode = currentInstruction->as<OpEqNull>(); |
729 | int dst = bytecode.m_dst.offset(); |
730 | int src = bytecode.m_operand.offset(); |
731 | |
732 | emitLoad(src, regT1, regT0); |
733 | Jump isImmediate = branchIfNotCell(regT1); |
734 | |
735 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
736 | move(TrustedImm32(0), regT1); |
737 | Jump wasNotMasqueradesAsUndefined = jump(); |
738 | |
739 | isMasqueradesAsUndefined.link(this); |
740 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); |
741 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
742 | loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); |
743 | compare32(Equal, regT0, regT2, regT1); |
744 | Jump wasNotImmediate = jump(); |
745 | |
746 | isImmediate.link(this); |
747 | |
748 | compare32(Equal, regT1, TrustedImm32(JSValue::NullTag), regT2); |
749 | compare32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag), regT1); |
750 | or32(regT2, regT1); |
751 | |
752 | wasNotImmediate.link(this); |
753 | wasNotMasqueradesAsUndefined.link(this); |
754 | |
755 | emitStoreBool(dst, regT1); |
756 | } |
757 | |
758 | void JIT::emit_op_neq_null(const Instruction* currentInstruction) |
759 | { |
760 | auto bytecode = currentInstruction->as<OpNeqNull>(); |
761 | int dst = bytecode.m_dst.offset(); |
762 | int src = bytecode.m_operand.offset(); |
763 | |
764 | emitLoad(src, regT1, regT0); |
765 | Jump isImmediate = branchIfNotCell(regT1); |
766 | |
767 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
768 | move(TrustedImm32(1), regT1); |
769 | Jump wasNotMasqueradesAsUndefined = jump(); |
770 | |
771 | isMasqueradesAsUndefined.link(this); |
772 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); |
773 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
774 | loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); |
775 | compare32(NotEqual, regT0, regT2, regT1); |
776 | Jump wasNotImmediate = jump(); |
777 | |
778 | isImmediate.link(this); |
779 | |
780 | compare32(NotEqual, regT1, TrustedImm32(JSValue::NullTag), regT2); |
781 | compare32(NotEqual, regT1, TrustedImm32(JSValue::UndefinedTag), regT1); |
782 | and32(regT2, regT1); |
783 | |
784 | wasNotImmediate.link(this); |
785 | wasNotMasqueradesAsUndefined.link(this); |
786 | |
787 | emitStoreBool(dst, regT1); |
788 | } |
789 | |
790 | void JIT::emit_op_throw(const Instruction* currentInstruction) |
791 | { |
792 | auto bytecode = currentInstruction->as<OpThrow>(); |
793 | ASSERT(regT0 == returnValueGPR); |
794 | copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); |
795 | emitLoad(bytecode.m_value.offset(), regT1, regT0); |
796 | callOperationNoExceptionCheck(operationThrow, JSValueRegs(regT1, regT0)); |
797 | jumpToExceptionHandler(*vm()); |
798 | } |
799 | |
800 | void JIT::emit_op_to_number(const Instruction* currentInstruction) |
801 | { |
802 | auto bytecode = currentInstruction->as<OpToNumber>(); |
803 | int dst = bytecode.m_dst.offset(); |
804 | int src = bytecode.m_operand.offset(); |
805 | |
806 | emitLoad(src, regT1, regT0); |
807 | |
808 | Jump isInt32 = branchIfInt32(regT1); |
809 | addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag))); |
810 | isInt32.link(this); |
811 | |
812 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
813 | if (src != dst) |
814 | emitStore(dst, regT1, regT0); |
815 | } |
816 | |
817 | void JIT::emit_op_to_string(const Instruction* currentInstruction) |
818 | { |
819 | auto bytecode = currentInstruction->as<OpToString>(); |
820 | int dst = bytecode.m_dst.offset(); |
821 | int src = bytecode.m_operand.offset(); |
822 | |
823 | emitLoad(src, regT1, regT0); |
824 | |
825 | addSlowCase(branchIfNotCell(regT1)); |
826 | addSlowCase(branchIfNotString(regT0)); |
827 | |
828 | if (src != dst) |
829 | emitStore(dst, regT1, regT0); |
830 | } |
831 | |
832 | void JIT::emit_op_to_object(const Instruction* currentInstruction) |
833 | { |
834 | auto bytecode = currentInstruction->as<OpToObject>(); |
835 | int dst = bytecode.m_dst.offset(); |
836 | int src = bytecode.m_operand.offset(); |
837 | |
838 | emitLoad(src, regT1, regT0); |
839 | |
840 | addSlowCase(branchIfNotCell(regT1)); |
841 | addSlowCase(branchIfNotObject(regT0)); |
842 | |
843 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
844 | if (src != dst) |
845 | emitStore(dst, regT1, regT0); |
846 | } |
847 | |
848 | void JIT::emit_op_catch(const Instruction* currentInstruction) |
849 | { |
850 | auto bytecode = currentInstruction->as<OpCatch>(); |
851 | |
852 | restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); |
853 | |
854 | move(TrustedImmPtr(m_vm), regT3); |
855 | // operationThrow returns the callFrame for the handler. |
856 | load32(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister); |
857 | storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset())); |
858 | |
859 | addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); |
860 | |
861 | callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler); |
862 | Jump isCatchableException = branchTest32(Zero, returnValueGPR); |
863 | jumpToExceptionHandler(*vm()); |
864 | isCatchableException.link(this); |
865 | |
866 | move(TrustedImmPtr(m_vm), regT3); |
867 | |
868 | // Now store the exception returned by operationThrow. |
869 | load32(Address(regT3, VM::exceptionOffset()), regT2); |
870 | move(TrustedImm32(JSValue::CellTag), regT1); |
871 | |
872 | store32(TrustedImm32(0), Address(regT3, VM::exceptionOffset())); |
873 | |
874 | unsigned exception = bytecode.m_exception.offset(); |
875 | emitStore(exception, regT1, regT2); |
876 | |
877 | load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); |
878 | load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); |
879 | |
880 | unsigned thrownValue = bytecode.m_thrownValue.offset(); |
881 | emitStore(thrownValue, regT1, regT0); |
882 | |
883 | #if ENABLE(DFG_JIT) |
884 | // FIXME: consider inline caching the process of doing OSR entry, including |
885 | // argument type proofs, storing locals to the buffer, etc |
886 | // https://bugs.webkit.org/show_bug.cgi?id=175598 |
887 | |
888 | auto& metadata = bytecode.metadata(m_codeBlock); |
889 | ValueProfileAndOperandBuffer* buffer = metadata.m_buffer; |
890 | if (buffer || !shouldEmitProfiling()) |
891 | callOperation(operationTryOSREnterAtCatch, m_bytecodeOffset); |
892 | else |
893 | callOperation(operationTryOSREnterAtCatchAndValueProfile, m_bytecodeOffset); |
894 | auto skipOSREntry = branchTestPtr(Zero, returnValueGPR); |
895 | emitRestoreCalleeSaves(); |
896 | jump(returnValueGPR, NoPtrTag); |
897 | skipOSREntry.link(this); |
898 | if (buffer && shouldEmitProfiling()) { |
899 | buffer->forEach([&] (ValueProfileAndOperand& profile) { |
900 | JSValueRegs regs(regT1, regT0); |
901 | emitGetVirtualRegister(profile.m_operand, regs); |
902 | emitValueProfilingSite(static_cast<ValueProfile&>(profile)); |
903 | }); |
904 | } |
905 | #endif // ENABLE(DFG_JIT) |
906 | } |
907 | |
908 | void JIT::emit_op_identity_with_profile(const Instruction*) |
909 | { |
910 | // We don't need to do anything here... |
911 | } |
912 | |
913 | void JIT::emit_op_get_parent_scope(const Instruction* currentInstruction) |
914 | { |
915 | auto bytecode = currentInstruction->as<OpGetParentScope>(); |
916 | int currentScope = bytecode.m_scope.offset(); |
917 | emitLoadPayload(currentScope, regT0); |
918 | loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); |
919 | emitStoreCell(bytecode.m_dst.offset(), regT0); |
920 | } |
921 | |
922 | void JIT::emit_op_switch_imm(const Instruction* currentInstruction) |
923 | { |
924 | auto bytecode = currentInstruction->as<OpSwitchImm>(); |
925 | size_t tableIndex = bytecode.m_tableIndex; |
926 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset); |
927 | unsigned scrutinee = bytecode.m_scrutinee.offset(); |
928 | |
929 | // create jump table for switch destinations, track this switch statement. |
930 | SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); |
931 | m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); |
932 | jumpTable->ensureCTITable(); |
933 | |
934 | emitLoad(scrutinee, regT1, regT0); |
935 | callOperation(operationSwitchImmWithUnknownKeyType, JSValueRegs(regT1, regT0), tableIndex); |
936 | jump(returnValueGPR, NoPtrTag); |
937 | } |
938 | |
939 | void JIT::emit_op_switch_char(const Instruction* currentInstruction) |
940 | { |
941 | auto bytecode = currentInstruction->as<OpSwitchChar>(); |
942 | size_t tableIndex = bytecode.m_tableIndex; |
943 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset); |
944 | unsigned scrutinee = bytecode.m_scrutinee.offset(); |
945 | |
946 | // create jump table for switch destinations, track this switch statement. |
947 | SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); |
948 | m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); |
949 | jumpTable->ensureCTITable(); |
950 | |
951 | emitLoad(scrutinee, regT1, regT0); |
952 | callOperation(operationSwitchCharWithUnknownKeyType, JSValueRegs(regT1, regT0), tableIndex); |
953 | jump(returnValueGPR, NoPtrTag); |
954 | } |
955 | |
956 | void JIT::emit_op_switch_string(const Instruction* currentInstruction) |
957 | { |
958 | auto bytecode = currentInstruction->as<OpSwitchString>(); |
959 | size_t tableIndex = bytecode.m_tableIndex; |
960 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset); |
961 | unsigned scrutinee = bytecode.m_scrutinee.offset(); |
962 | |
963 | // create jump table for switch destinations, track this switch statement. |
964 | StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); |
965 | m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset)); |
966 | |
967 | emitLoad(scrutinee, regT1, regT0); |
968 | callOperation(operationSwitchStringWithUnknownKeyType, JSValueRegs(regT1, regT0), tableIndex); |
969 | jump(returnValueGPR, NoPtrTag); |
970 | } |
971 | |
972 | void JIT::emit_op_debug(const Instruction* currentInstruction) |
973 | { |
974 | auto bytecode = currentInstruction->as<OpDebug>(); |
975 | load32(codeBlock()->debuggerRequestsAddress(), regT0); |
976 | Jump noDebuggerRequests = branchTest32(Zero, regT0); |
977 | callOperation(operationDebug, static_cast<int>(bytecode.m_debugHookType)); |
978 | noDebuggerRequests.link(this); |
979 | } |
980 | |
981 | |
982 | void JIT::emit_op_enter(const Instruction* currentInstruction) |
983 | { |
984 | emitEnterOptimizationCheck(); |
985 | |
986 | // Even though JIT code doesn't use them, we initialize our constant |
987 | // registers to zap stale pointers, to avoid unnecessarily prolonging |
988 | // object lifetime and increasing GC pressure. |
989 | for (int i = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); i < m_codeBlock->numVars(); ++i) |
990 | emitStore(virtualRegisterForLocal(i).offset(), jsUndefined()); |
991 | |
992 | JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter); |
993 | slowPathCall.call(); |
994 | } |
995 | |
996 | void JIT::emit_op_get_scope(const Instruction* currentInstruction) |
997 | { |
998 | auto bytecode = currentInstruction->as<OpGetScope>(); |
999 | int dst = bytecode.m_dst.offset(); |
1000 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0); |
1001 | loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); |
1002 | emitStoreCell(dst, regT0); |
1003 | } |
1004 | |
1005 | void JIT::emit_op_create_this(const Instruction* currentInstruction) |
1006 | { |
1007 | auto bytecode = currentInstruction->as<OpCreateThis>(); |
1008 | auto& metadata = bytecode.metadata(m_codeBlock); |
1009 | int callee = bytecode.m_callee.offset(); |
1010 | WriteBarrierBase<JSCell>* cachedFunction = &metadata.m_cachedCallee; |
1011 | RegisterID calleeReg = regT0; |
1012 | RegisterID rareDataReg = regT4; |
1013 | RegisterID resultReg = regT0; |
1014 | RegisterID allocatorReg = regT1; |
1015 | RegisterID structureReg = regT2; |
1016 | RegisterID cachedFunctionReg = regT4; |
1017 | RegisterID scratchReg = regT3; |
1018 | |
1019 | emitLoadPayload(callee, calleeReg); |
1020 | addSlowCase(branchIfNotFunction(calleeReg)); |
1021 | loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); |
1022 | addSlowCase(branchTestPtr(Zero, rareDataReg)); |
1023 | load32(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfAllocator()), allocatorReg); |
1024 | loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfStructure()), structureReg); |
1025 | |
1026 | loadPtr(cachedFunction, cachedFunctionReg); |
1027 | Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); |
1028 | addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); |
1029 | hasSeenMultipleCallees.link(this); |
1030 | |
1031 | JumpList slowCases; |
1032 | auto butterfly = TrustedImmPtr(nullptr); |
1033 | emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, scratchReg, slowCases); |
1034 | load8(Address(structureReg, Structure::inlineCapacityOffset()), scratchReg); |
1035 | emitInitializeInlineStorage(resultReg, scratchReg); |
1036 | addSlowCase(slowCases); |
1037 | emitStoreCell(bytecode.m_dst.offset(), resultReg); |
1038 | } |
1039 | |
1040 | void JIT::emit_op_to_this(const Instruction* currentInstruction) |
1041 | { |
1042 | auto bytecode = currentInstruction->as<OpToThis>(); |
1043 | auto& metadata = bytecode.metadata(m_codeBlock); |
1044 | StructureID* cachedStructureID = &metadata.m_cachedStructureID; |
1045 | int thisRegister = bytecode.m_srcDst.offset(); |
1046 | |
1047 | emitLoad(thisRegister, regT3, regT2); |
1048 | |
1049 | addSlowCase(branchIfNotCell(regT3)); |
1050 | addSlowCase(branchIfNotType(regT2, FinalObjectType)); |
1051 | loadPtr(Address(regT2, JSCell::structureIDOffset()), regT0); |
1052 | load32(cachedStructureID, regT2); |
1053 | addSlowCase(branchPtr(NotEqual, regT0, regT2)); |
1054 | } |
1055 | |
1056 | void JIT::emit_op_check_tdz(const Instruction* currentInstruction) |
1057 | { |
1058 | auto bytecode = currentInstruction->as<OpCheckTdz>(); |
1059 | emitLoadTag(bytecode.m_targetVirtualRegister.offset(), regT0); |
1060 | addSlowCase(branchIfEmpty(regT0)); |
1061 | } |
1062 | |
1063 | void JIT::emit_op_has_structure_property(const Instruction* currentInstruction) |
1064 | { |
1065 | auto bytecode = currentInstruction->as<OpHasStructureProperty>(); |
1066 | int dst = bytecode.m_dst.offset(); |
1067 | int base = bytecode.m_base.offset(); |
1068 | int enumerator = bytecode.m_enumerator.offset(); |
1069 | |
1070 | emitLoadPayload(base, regT0); |
1071 | emitJumpSlowCaseIfNotJSCell(base); |
1072 | |
1073 | emitLoadPayload(enumerator, regT1); |
1074 | |
1075 | load32(Address(regT0, JSCell::structureIDOffset()), regT0); |
1076 | addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); |
1077 | |
1078 | move(TrustedImm32(1), regT0); |
1079 | emitStoreBool(dst, regT0); |
1080 | } |
1081 | |
1082 | void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) |
1083 | { |
1084 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
1085 | |
1086 | PatchableJump badType; |
1087 | |
1088 | // FIXME: Add support for other types like TypedArrays and Arguments. |
1089 | // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. |
1090 | JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); |
1091 | move(TrustedImm32(1), regT0); |
1092 | Jump done = jump(); |
1093 | |
1094 | LinkBuffer patchBuffer(*this, m_codeBlock); |
1095 | |
1096 | patchBuffer.link(badType, byValInfo->slowPathTarget); |
1097 | patchBuffer.link(slowCases, byValInfo->slowPathTarget); |
1098 | |
1099 | patchBuffer.link(done, byValInfo->badTypeDoneTarget); |
1100 | |
1101 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1102 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1103 | "Baseline has_indexed_property stub for %s, return point %p" , toCString(*m_codeBlock).data(), returnAddress.value()); |
1104 | |
1105 | MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code())); |
1106 | MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationHasIndexedPropertyGeneric)); |
1107 | } |
1108 | |
1109 | void JIT::emit_op_has_indexed_property(const Instruction* currentInstruction) |
1110 | { |
1111 | auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); |
1112 | auto& metadata = bytecode.metadata(m_codeBlock); |
1113 | int dst = bytecode.m_dst.offset(); |
1114 | int base = bytecode.m_base.offset(); |
1115 | int property = bytecode.m_property.offset(); |
1116 | ArrayProfile* profile = &metadata.m_arrayProfile; |
1117 | ByValInfo* byValInfo = m_codeBlock->addByValInfo(); |
1118 | |
1119 | emitLoadPayload(base, regT0); |
1120 | emitJumpSlowCaseIfNotJSCell(base); |
1121 | |
1122 | emitLoad(property, regT3, regT1); |
1123 | addSlowCase(branchIfNotInt32(regT3)); |
1124 | |
1125 | // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. |
1126 | // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if |
1127 | // number was signed since m_vectorLength is always less than intmax (since the total allocation |
1128 | // size is always less than 4Gb). As such zero extending will have been correct (and extending the value |
1129 | // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign |
1130 | // extending since it makes it easier to re-tag the value in the slow case. |
1131 | zeroExtend32ToPtr(regT1, regT1); |
1132 | |
1133 | emitArrayProfilingSiteWithCell(regT0, regT2, profile); |
1134 | and32(TrustedImm32(IndexingShapeMask), regT2); |
1135 | |
1136 | JITArrayMode mode = chooseArrayMode(profile); |
1137 | PatchableJump badType; |
1138 | |
1139 | // FIXME: Add support for other types like TypedArrays and Arguments. |
1140 | // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. |
1141 | JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); |
1142 | move(TrustedImm32(1), regT0); |
1143 | |
1144 | addSlowCase(badType); |
1145 | addSlowCase(slowCases); |
1146 | |
1147 | Label done = label(); |
1148 | |
1149 | emitStoreBool(dst, regT0); |
1150 | |
1151 | Label nextHotPath = label(); |
1152 | |
1153 | m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath)); |
1154 | } |
1155 | |
1156 | void JIT::emitSlow_op_has_indexed_property(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1157 | { |
1158 | linkAllSlowCases(iter); |
1159 | |
1160 | auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); |
1161 | int dst = bytecode.m_dst.offset(); |
1162 | int base = bytecode.m_base.offset(); |
1163 | int property = bytecode.m_property.offset(); |
1164 | ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; |
1165 | |
1166 | Label slowPath = label(); |
1167 | |
1168 | emitLoad(base, regT1, regT0); |
1169 | emitLoad(property, regT3, regT2); |
1170 | Call call = callOperation(operationHasIndexedPropertyDefault, dst, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2), byValInfo); |
1171 | |
1172 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; |
1173 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; |
1174 | m_byValInstructionIndex++; |
1175 | } |
1176 | |
1177 | void JIT::emit_op_get_direct_pname(const Instruction* currentInstruction) |
1178 | { |
1179 | auto bytecode = currentInstruction->as<OpGetDirectPname>(); |
1180 | int dst = bytecode.m_dst.offset(); |
1181 | int base = bytecode.m_base.offset(); |
1182 | int index = bytecode.m_index.offset(); |
1183 | int enumerator = bytecode.m_enumerator.offset(); |
1184 | |
1185 | // Check that base is a cell |
1186 | emitLoadPayload(base, regT0); |
1187 | emitJumpSlowCaseIfNotJSCell(base); |
1188 | |
1189 | // Check the structure |
1190 | emitLoadPayload(enumerator, regT1); |
1191 | load32(Address(regT0, JSCell::structureIDOffset()), regT2); |
1192 | addSlowCase(branch32(NotEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); |
1193 | |
1194 | // Compute the offset |
1195 | emitLoadPayload(index, regT2); |
1196 | // If index is less than the enumerator's cached inline storage, then it's an inline access |
1197 | Jump outOfLineAccess = branch32(AboveOrEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); |
1198 | addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); |
1199 | load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); |
1200 | load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); |
1201 | |
1202 | Jump done = jump(); |
1203 | |
1204 | // Otherwise it's out of line |
1205 | outOfLineAccess.link(this); |
1206 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); |
1207 | sub32(Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT2); |
1208 | neg32(regT2); |
1209 | int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); |
1210 | load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); |
1211 | load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); |
1212 | |
1213 | done.link(this); |
1214 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
1215 | emitStore(dst, regT1, regT0); |
1216 | } |
1217 | |
1218 | void JIT::emit_op_enumerator_structure_pname(const Instruction* currentInstruction) |
1219 | { |
1220 | auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>(); |
1221 | int dst = bytecode.m_dst.offset(); |
1222 | int enumerator = bytecode.m_enumerator.offset(); |
1223 | int index = bytecode.m_index.offset(); |
1224 | |
1225 | emitLoadPayload(index, regT0); |
1226 | emitLoadPayload(enumerator, regT1); |
1227 | Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); |
1228 | |
1229 | move(TrustedImm32(JSValue::NullTag), regT2); |
1230 | move(TrustedImm32(0), regT0); |
1231 | |
1232 | Jump done = jump(); |
1233 | inBounds.link(this); |
1234 | |
1235 | loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); |
1236 | loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); |
1237 | move(TrustedImm32(JSValue::CellTag), regT2); |
1238 | |
1239 | done.link(this); |
1240 | emitStore(dst, regT2, regT0); |
1241 | } |
1242 | |
1243 | void JIT::emit_op_enumerator_generic_pname(const Instruction* currentInstruction) |
1244 | { |
1245 | auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>(); |
1246 | int dst = bytecode.m_dst.offset(); |
1247 | int enumerator = bytecode.m_enumerator.offset(); |
1248 | int index = bytecode.m_index.offset(); |
1249 | |
1250 | emitLoadPayload(index, regT0); |
1251 | emitLoadPayload(enumerator, regT1); |
1252 | Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); |
1253 | |
1254 | move(TrustedImm32(JSValue::NullTag), regT2); |
1255 | move(TrustedImm32(0), regT0); |
1256 | |
1257 | Jump done = jump(); |
1258 | inBounds.link(this); |
1259 | |
1260 | loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); |
1261 | loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); |
1262 | move(TrustedImm32(JSValue::CellTag), regT2); |
1263 | |
1264 | done.link(this); |
1265 | emitStore(dst, regT2, regT0); |
1266 | } |
1267 | |
1268 | void JIT::emit_op_profile_type(const Instruction* currentInstruction) |
1269 | { |
1270 | auto bytecode = currentInstruction->as<OpProfileType>(); |
1271 | auto& metadata = bytecode.metadata(m_codeBlock); |
1272 | TypeLocation* cachedTypeLocation = metadata.m_typeLocation; |
1273 | int valueToProfile = bytecode.m_targetVirtualRegister.offset(); |
1274 | |
1275 | // Load payload in T0. Load tag in T3. |
1276 | emitLoadPayload(valueToProfile, regT0); |
1277 | emitLoadTag(valueToProfile, regT3); |
1278 | |
1279 | JumpList jumpToEnd; |
1280 | |
1281 | jumpToEnd.append(branchIfEmpty(regT3)); |
1282 | |
1283 | // Compile in a predictive type check, if possible, to see if we can skip writing to the log. |
1284 | // These typechecks are inlined to match those of the 32-bit JSValue type checks. |
1285 | if (cachedTypeLocation->m_lastSeenType == TypeUndefined) |
1286 | jumpToEnd.append(branchIfUndefined(regT3)); |
1287 | else if (cachedTypeLocation->m_lastSeenType == TypeNull) |
1288 | jumpToEnd.append(branchIfNull(regT3)); |
1289 | else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) |
1290 | jumpToEnd.append(branchIfBoolean(regT3, InvalidGPRReg)); |
1291 | else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt) |
1292 | jumpToEnd.append(branchIfInt32(regT3)); |
1293 | else if (cachedTypeLocation->m_lastSeenType == TypeNumber) { |
1294 | jumpToEnd.append(branchIfNumber(JSValueRegs(regT3, regT0), regT1)); |
1295 | } else if (cachedTypeLocation->m_lastSeenType == TypeString) { |
1296 | Jump isNotCell = branchIfNotCell(regT3); |
1297 | jumpToEnd.append(branchIfString(regT0)); |
1298 | isNotCell.link(this); |
1299 | } |
1300 | |
1301 | // Load the type profiling log into T2. |
1302 | TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); |
1303 | move(TrustedImmPtr(cachedTypeProfilerLog), regT2); |
1304 | |
1305 | // Load the next log entry into T1. |
1306 | loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); |
1307 | |
1308 | // Store the JSValue onto the log entry. |
1309 | store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); |
1310 | store32(regT3, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
1311 | |
1312 | // Store the structureID of the cell if argument is a cell, otherwise, store 0 on the log entry. |
1313 | Jump notCell = branchIfNotCell(regT3); |
1314 | load32(Address(regT0, JSCell::structureIDOffset()), regT0); |
1315 | store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); |
1316 | Jump skipNotCell = jump(); |
1317 | notCell.link(this); |
1318 | store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); |
1319 | skipNotCell.link(this); |
1320 | |
1321 | // Store the typeLocation on the log entry. |
1322 | move(TrustedImmPtr(cachedTypeLocation), regT0); |
1323 | store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); |
1324 | |
1325 | // Increment the current log entry. |
1326 | addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); |
1327 | store32(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); |
1328 | jumpToEnd.append(branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()))); |
1329 | // Clear the log if we're at the end of the log. |
1330 | callOperation(operationProcessTypeProfilerLog); |
1331 | |
1332 | jumpToEnd.link(this); |
1333 | } |
1334 | |
1335 | void JIT::emit_op_log_shadow_chicken_prologue(const Instruction* currentInstruction) |
1336 | { |
1337 | RELEASE_ASSERT(vm()->shadowChicken()); |
1338 | updateTopCallFrame(); |
1339 | static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true." ); |
1340 | auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>(); |
1341 | GPRReg shadowPacketReg = regT0; |
1342 | GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. |
1343 | GPRReg scratch2Reg = regT2; |
1344 | ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg); |
1345 | |
1346 | scratch1Reg = regT4; |
1347 | emitLoadPayload(bytecode.m_scope.offset(), regT3); |
1348 | logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3); |
1349 | } |
1350 | |
1351 | void JIT::emit_op_log_shadow_chicken_tail(const Instruction* currentInstruction) |
1352 | { |
1353 | RELEASE_ASSERT(vm()->shadowChicken()); |
1354 | updateTopCallFrame(); |
1355 | static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true." ); |
1356 | auto bytecode = currentInstruction->as<OpLogShadowChickenTail>(); |
1357 | GPRReg shadowPacketReg = regT0; |
1358 | GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. |
1359 | GPRReg scratch2Reg = regT2; |
1360 | ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg); |
1361 | emitLoadPayload(bytecode.m_thisValue.offset(), regT2); |
1362 | emitLoadTag(bytecode.m_thisValue.offset(), regT1); |
1363 | JSValueRegs thisRegs(regT1, regT2); |
1364 | emitLoadPayload(bytecode.m_scope.offset(), regT3); |
1365 | logShadowChickenTailPacket(shadowPacketReg, thisRegs, regT3, m_codeBlock, CallSiteIndex(currentInstruction)); |
1366 | } |
1367 | |
1368 | } // namespace JSC |
1369 | |
1370 | #endif // USE(JSVALUE32_64) |
1371 | #endif // ENABLE(JIT) |
1372 | |