1/*
2 * Copyright (C) 2009-2019 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 Patrick Gansterer <[email protected]>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include "config.h"
28#if ENABLE(JIT)
29#include "JIT.h"
30
31#include "BasicBlockLocation.h"
32#include "BytecodeStructs.h"
33#include "Exception.h"
34#include "Heap.h"
35#include "InterpreterInlines.h"
36#include "JITInlines.h"
37#include "JSArray.h"
38#include "JSCast.h"
39#include "JSFunction.h"
40#include "JSPropertyNameEnumerator.h"
41#include "LinkBuffer.h"
42#include "MaxFrameExtentForSlowPathCall.h"
43#include "OpcodeInlines.h"
44#include "SlowPathCall.h"
45#include "SuperSampler.h"
46#include "ThunkGenerators.h"
47#include "TypeLocation.h"
48#include "TypeProfilerLog.h"
49#include "VirtualRegister.h"
50#include "Watchdog.h"
51
52namespace JSC {
53
54#if USE(JSVALUE64)
55
56void JIT::emit_op_mov(const Instruction* currentInstruction)
57{
58 auto bytecode = currentInstruction->as<OpMov>();
59 int dst = bytecode.m_dst.offset();
60 int src = bytecode.m_src.offset();
61
62 if (m_codeBlock->isConstantRegisterIndex(src)) {
63 JSValue value = m_codeBlock->getConstant(src);
64 if (!value.isNumber())
65 store64(TrustedImm64(JSValue::encode(value)), addressFor(dst));
66 else
67 store64(Imm64(JSValue::encode(value)), addressFor(dst));
68 return;
69 }
70
71 load64(addressFor(src), regT0);
72 store64(regT0, addressFor(dst));
73}
74
75
76void JIT::emit_op_end(const Instruction* currentInstruction)
77{
78 auto bytecode = currentInstruction->as<OpEnd>();
79 RELEASE_ASSERT(returnValueGPR != callFrameRegister);
80 emitGetVirtualRegister(bytecode.m_value.offset(), returnValueGPR);
81 emitRestoreCalleeSaves();
82 emitFunctionEpilogue();
83 ret();
84}
85
86void JIT::emit_op_jmp(const Instruction* currentInstruction)
87{
88 auto bytecode = currentInstruction->as<OpJmp>();
89 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
90 addJump(jump(), target);
91}
92
93void JIT::emit_op_new_object(const Instruction* currentInstruction)
94{
95 auto bytecode = currentInstruction->as<OpNewObject>();
96 auto& metadata = bytecode.metadata(m_codeBlock);
97 Structure* structure = metadata.m_objectAllocationProfile.structure();
98 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
99 Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(*m_vm, allocationSize, AllocatorForMode::AllocatorIfExists);
100
101 RegisterID resultReg = regT0;
102 RegisterID allocatorReg = regT1;
103 RegisterID scratchReg = regT2;
104
105 if (!allocator)
106 addSlowCase(jump());
107 else {
108 JumpList slowCases;
109 auto butterfly = TrustedImmPtr(nullptr);
110 emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, scratchReg, slowCases);
111 emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
112 addSlowCase(slowCases);
113 emitPutVirtualRegister(bytecode.m_dst.offset());
114 }
115}
116
117void JIT::emitSlow_op_new_object(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
118{
119 linkAllSlowCases(iter);
120
121 auto bytecode = currentInstruction->as<OpNewObject>();
122 auto& metadata = bytecode.metadata(m_codeBlock);
123 int dst = bytecode.m_dst.offset();
124 Structure* structure = metadata.m_objectAllocationProfile.structure();
125 callOperation(operationNewObject, structure);
126 emitStoreCell(dst, returnValueGPR);
127}
128
129void JIT::emit_op_overrides_has_instance(const Instruction* currentInstruction)
130{
131 auto bytecode = currentInstruction->as<OpOverridesHasInstance>();
132 int dst = bytecode.m_dst.offset();
133 int constructor = bytecode.m_constructor.offset();
134 int hasInstanceValue = bytecode.m_hasInstanceValue.offset();
135
136 emitGetVirtualRegister(hasInstanceValue, regT0);
137
138 // We don't jump if we know what Symbol.hasInstance would do.
139 Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
140
141 emitGetVirtualRegister(constructor, regT0);
142
143 // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
144 test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
145 boxBoolean(regT0, JSValueRegs { regT0 });
146 Jump done = jump();
147
148 customhasInstanceValue.link(this);
149 move(TrustedImm32(ValueTrue), regT0);
150
151 done.link(this);
152 emitPutVirtualRegister(dst);
153}
154
155void JIT::emit_op_instanceof(const Instruction* currentInstruction)
156{
157 auto bytecode = currentInstruction->as<OpInstanceof>();
158 int dst = bytecode.m_dst.offset();
159 int value = bytecode.m_value.offset();
160 int proto = bytecode.m_prototype.offset();
161
162 // Load the operands (baseVal, proto, and value respectively) into registers.
163 // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
164 emitGetVirtualRegister(value, regT2);
165 emitGetVirtualRegister(proto, regT1);
166
167 // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
168 emitJumpSlowCaseIfNotJSCell(regT2, value);
169 emitJumpSlowCaseIfNotJSCell(regT1, proto);
170
171 JITInstanceOfGenerator gen(
172 m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset),
173 RegisterSet::stubUnavailableRegisters(),
174 regT0, // result
175 regT2, // value
176 regT1, // proto
177 regT3, regT4); // scratch
178 gen.generateFastPath(*this);
179 m_instanceOfs.append(gen);
180
181 emitPutVirtualRegister(dst);
182}
183
184void JIT::emitSlow_op_instanceof(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
185{
186 linkAllSlowCases(iter);
187
188 auto bytecode = currentInstruction->as<OpInstanceof>();
189 int resultVReg = bytecode.m_dst.offset();
190
191 JITInstanceOfGenerator& gen = m_instanceOfs[m_instanceOfIndex++];
192
193 Label coldPathBegin = label();
194 Call call = callOperation(operationInstanceOfOptimize, resultVReg, gen.stubInfo(), regT2, regT1);
195 gen.reportSlowPathCall(coldPathBegin, call);
196}
197
198void JIT::emit_op_instanceof_custom(const Instruction*)
199{
200 // This always goes to slow path since we expect it to be rare.
201 addSlowCase(jump());
202}
203
204void JIT::emit_op_is_empty(const Instruction* currentInstruction)
205{
206 auto bytecode = currentInstruction->as<OpIsEmpty>();
207 int dst = bytecode.m_dst.offset();
208 int value = bytecode.m_operand.offset();
209
210 emitGetVirtualRegister(value, regT0);
211 compare64(Equal, regT0, TrustedImm32(JSValue::encode(JSValue())), regT0);
212
213 boxBoolean(regT0, JSValueRegs { regT0 });
214 emitPutVirtualRegister(dst);
215}
216
217void JIT::emit_op_is_undefined(const Instruction* currentInstruction)
218{
219 auto bytecode = currentInstruction->as<OpIsUndefined>();
220 int dst = bytecode.m_dst.offset();
221 int value = bytecode.m_operand.offset();
222
223 emitGetVirtualRegister(value, regT0);
224 Jump isCell = branchIfCell(regT0);
225
226 compare64(Equal, regT0, TrustedImm32(ValueUndefined), regT0);
227 Jump done = jump();
228
229 isCell.link(this);
230 Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
231 move(TrustedImm32(0), regT0);
232 Jump notMasqueradesAsUndefined = jump();
233
234 isMasqueradesAsUndefined.link(this);
235 emitLoadStructure(*vm(), regT0, regT1, regT2);
236 move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
237 loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
238 comparePtr(Equal, regT0, regT1, regT0);
239
240 notMasqueradesAsUndefined.link(this);
241 done.link(this);
242 boxBoolean(regT0, JSValueRegs { regT0 });
243 emitPutVirtualRegister(dst);
244}
245
246void JIT::emit_op_is_undefined_or_null(const Instruction* currentInstruction)
247{
248 auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>();
249 int dst = bytecode.m_dst.offset();
250 int value = bytecode.m_operand.offset();
251
252 emitGetVirtualRegister(value, regT0);
253
254 and64(TrustedImm32(~TagBitUndefined), regT0);
255 compare64(Equal, regT0, TrustedImm32(ValueNull), regT0);
256
257 boxBoolean(regT0, JSValueRegs { regT0 });
258 emitPutVirtualRegister(dst);
259}
260
261void JIT::emit_op_is_boolean(const Instruction* currentInstruction)
262{
263 auto bytecode = currentInstruction->as<OpIsBoolean>();
264 int dst = bytecode.m_dst.offset();
265 int value = bytecode.m_operand.offset();
266
267 emitGetVirtualRegister(value, regT0);
268 xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
269 test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
270 boxBoolean(regT0, JSValueRegs { regT0 });
271 emitPutVirtualRegister(dst);
272}
273
274void JIT::emit_op_is_number(const Instruction* currentInstruction)
275{
276 auto bytecode = currentInstruction->as<OpIsNumber>();
277 int dst = bytecode.m_dst.offset();
278 int value = bytecode.m_operand.offset();
279
280 emitGetVirtualRegister(value, regT0);
281 test64(NonZero, regT0, tagTypeNumberRegister, regT0);
282 boxBoolean(regT0, JSValueRegs { regT0 });
283 emitPutVirtualRegister(dst);
284}
285
286void JIT::emit_op_is_cell_with_type(const Instruction* currentInstruction)
287{
288 auto bytecode = currentInstruction->as<OpIsCellWithType>();
289 int dst = bytecode.m_dst.offset();
290 int value = bytecode.m_operand.offset();
291 int type = bytecode.m_type;
292
293 emitGetVirtualRegister(value, regT0);
294 Jump isNotCell = branchIfNotCell(regT0);
295
296 compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(type), regT0);
297 boxBoolean(regT0, JSValueRegs { regT0 });
298 Jump done = jump();
299
300 isNotCell.link(this);
301 move(TrustedImm32(ValueFalse), regT0);
302
303 done.link(this);
304 emitPutVirtualRegister(dst);
305}
306
307void JIT::emit_op_is_object(const Instruction* currentInstruction)
308{
309 auto bytecode = currentInstruction->as<OpIsObject>();
310 int dst = bytecode.m_dst.offset();
311 int value = bytecode.m_operand.offset();
312
313 emitGetVirtualRegister(value, regT0);
314 Jump isNotCell = branchIfNotCell(regT0);
315
316 compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
317 boxBoolean(regT0, JSValueRegs { regT0 });
318 Jump done = jump();
319
320 isNotCell.link(this);
321 move(TrustedImm32(ValueFalse), regT0);
322
323 done.link(this);
324 emitPutVirtualRegister(dst);
325}
326
327void JIT::emit_op_ret(const Instruction* currentInstruction)
328{
329 ASSERT(callFrameRegister != regT1);
330 ASSERT(regT1 != returnValueGPR);
331 ASSERT(returnValueGPR != callFrameRegister);
332
333 // Return the result in %eax.
334 auto bytecode = currentInstruction->as<OpRet>();
335 emitGetVirtualRegister(bytecode.m_value.offset(), returnValueGPR);
336
337 checkStackPointerAlignment();
338 emitRestoreCalleeSaves();
339 emitFunctionEpilogue();
340 ret();
341}
342
343void JIT::emit_op_to_primitive(const Instruction* currentInstruction)
344{
345 auto bytecode = currentInstruction->as<OpToPrimitive>();
346 int dst = bytecode.m_dst.offset();
347 int src = bytecode.m_src.offset();
348
349 emitGetVirtualRegister(src, regT0);
350
351 Jump isImm = branchIfNotCell(regT0);
352 addSlowCase(branchIfObject(regT0));
353 isImm.link(this);
354
355 if (dst != src)
356 emitPutVirtualRegister(dst);
357
358}
359
360void JIT::emit_op_set_function_name(const Instruction* currentInstruction)
361{
362 auto bytecode = currentInstruction->as<OpSetFunctionName>();
363 emitGetVirtualRegister(bytecode.m_function.offset(), regT0);
364 emitGetVirtualRegister(bytecode.m_name.offset(), regT1);
365 callOperation(operationSetFunctionName, regT0, regT1);
366}
367
368void JIT::emit_op_not(const Instruction* currentInstruction)
369{
370 auto bytecode = currentInstruction->as<OpNot>();
371 emitGetVirtualRegister(bytecode.m_operand.offset(), regT0);
372
373 // Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be
374 // clear other than the low bit (which will be 0 or 1 for false or true inputs respectively).
375 // Then invert against JSValue(true), which will add the tag back in, and flip the low bit.
376 xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
377 addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~1))));
378 xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0);
379
380 emitPutVirtualRegister(bytecode.m_dst.offset());
381}
382
383void JIT::emit_op_jfalse(const Instruction* currentInstruction)
384{
385 auto bytecode = currentInstruction->as<OpJfalse>();
386 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
387
388 GPRReg value = regT0;
389 GPRReg scratch1 = regT1;
390 GPRReg scratch2 = regT2;
391 bool shouldCheckMasqueradesAsUndefined = true;
392
393 emitGetVirtualRegister(bytecode.m_condition.offset(), value);
394 addJump(branchIfFalsey(*vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
395}
396
397void JIT::emit_op_jeq_null(const Instruction* currentInstruction)
398{
399 auto bytecode = currentInstruction->as<OpJeqNull>();
400 int src = bytecode.m_value.offset();
401 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
402
403 emitGetVirtualRegister(src, regT0);
404 Jump isImmediate = branchIfNotCell(regT0);
405
406 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
407 Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
408 emitLoadStructure(*vm(), regT0, regT2, regT1);
409 move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
410 addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
411 Jump masqueradesGlobalObjectIsForeign = jump();
412
413 // Now handle the immediate cases - undefined & null
414 isImmediate.link(this);
415 and64(TrustedImm32(~TagBitUndefined), regT0);
416 addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);
417
418 isNotMasqueradesAsUndefined.link(this);
419 masqueradesGlobalObjectIsForeign.link(this);
420};
421void JIT::emit_op_jneq_null(const Instruction* currentInstruction)
422{
423 auto bytecode = currentInstruction->as<OpJneqNull>();
424 int src = bytecode.m_value.offset();
425 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
426
427 emitGetVirtualRegister(src, regT0);
428 Jump isImmediate = branchIfNotCell(regT0);
429
430 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
431 addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
432 emitLoadStructure(*vm(), regT0, regT2, regT1);
433 move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
434 addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
435 Jump wasNotImmediate = jump();
436
437 // Now handle the immediate cases - undefined & null
438 isImmediate.link(this);
439 and64(TrustedImm32(~TagBitUndefined), regT0);
440 addJump(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsNull()))), target);
441
442 wasNotImmediate.link(this);
443}
444
445void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction)
446{
447 auto bytecode = currentInstruction->as<OpJneqPtr>();
448 auto& metadata = bytecode.metadata(m_codeBlock);
449 int src = bytecode.m_value.offset();
450 Special::Pointer ptr = bytecode.m_specialPointer;
451 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
452
453 emitGetVirtualRegister(src, regT0);
454 CCallHelpers::Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr)));
455 store8(TrustedImm32(1), &metadata.m_hasJumped);
456 addJump(jump(), target);
457 equal.link(this);
458}
459
460void JIT::emit_op_eq(const Instruction* currentInstruction)
461{
462 auto bytecode = currentInstruction->as<OpEq>();
463 emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1);
464 emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
465 compare32(Equal, regT1, regT0, regT0);
466 boxBoolean(regT0, JSValueRegs { regT0 });
467 emitPutVirtualRegister(bytecode.m_dst.offset());
468}
469
470void JIT::emit_op_jeq(const Instruction* currentInstruction)
471{
472 auto bytecode = currentInstruction->as<OpJeq>();
473 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
474 emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1);
475 emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
476 addJump(branch32(Equal, regT0, regT1), target);
477}
478
479void JIT::emit_op_jtrue(const Instruction* currentInstruction)
480{
481 auto bytecode = currentInstruction->as<OpJtrue>();
482 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
483
484 GPRReg value = regT0;
485 GPRReg scratch1 = regT1;
486 GPRReg scratch2 = regT2;
487 bool shouldCheckMasqueradesAsUndefined = true;
488 emitGetVirtualRegister(bytecode.m_condition.offset(), value);
489 addJump(branchIfTruthy(*vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target);
490}
491
492void JIT::emit_op_neq(const Instruction* currentInstruction)
493{
494 auto bytecode = currentInstruction->as<OpNeq>();
495 emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1);
496 emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
497 compare32(NotEqual, regT1, regT0, regT0);
498 boxBoolean(regT0, JSValueRegs { regT0 });
499
500 emitPutVirtualRegister(bytecode.m_dst.offset());
501}
502
503void JIT::emit_op_jneq(const Instruction* currentInstruction)
504{
505 auto bytecode = currentInstruction->as<OpJneq>();
506 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
507 emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1);
508 emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
509 addJump(branch32(NotEqual, regT0, regT1), target);
510}
511
512void JIT::emit_op_throw(const Instruction* currentInstruction)
513{
514 auto bytecode = currentInstruction->as<OpThrow>();
515 ASSERT(regT0 == returnValueGPR);
516 copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
517 emitGetVirtualRegister(bytecode.m_value.offset(), regT0);
518 callOperationNoExceptionCheck(operationThrow, regT0);
519 jumpToExceptionHandler(*vm());
520}
521
522template<typename Op>
523void JIT::compileOpStrictEq(const Instruction* currentInstruction, CompileOpStrictEqType type)
524{
525 auto bytecode = currentInstruction->as<Op>();
526 int dst = bytecode.m_dst.offset();
527 int src1 = bytecode.m_lhs.offset();
528 int src2 = bytecode.m_rhs.offset();
529
530 emitGetVirtualRegisters(src1, regT0, src2, regT1);
531
532 // Jump slow if both are cells (to cover strings).
533 move(regT0, regT2);
534 or64(regT1, regT2);
535 addSlowCase(branchIfCell(regT2));
536
537 // Jump slow if either is a double. First test if it's an integer, which is fine, and then test
538 // if it's a double.
539 Jump leftOK = branchIfInt32(regT0);
540 addSlowCase(branchIfNumber(regT0));
541 leftOK.link(this);
542 Jump rightOK = branchIfInt32(regT1);
543 addSlowCase(branchIfNumber(regT1));
544 rightOK.link(this);
545
546 if (type == CompileOpStrictEqType::StrictEq)
547 compare64(Equal, regT1, regT0, regT0);
548 else
549 compare64(NotEqual, regT1, regT0, regT0);
550 boxBoolean(regT0, JSValueRegs { regT0 });
551
552 emitPutVirtualRegister(dst);
553}
554
555void JIT::emit_op_stricteq(const Instruction* currentInstruction)
556{
557 compileOpStrictEq<OpStricteq>(currentInstruction, CompileOpStrictEqType::StrictEq);
558}
559
560void JIT::emit_op_nstricteq(const Instruction* currentInstruction)
561{
562 compileOpStrictEq<OpNstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq);
563}
564
565template<typename Op>
566void JIT::compileOpStrictEqJump(const Instruction* currentInstruction, CompileOpStrictEqType type)
567{
568 auto bytecode = currentInstruction->as<Op>();
569 int target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
570 int src1 = bytecode.m_lhs.offset();
571 int src2 = bytecode.m_rhs.offset();
572
573 emitGetVirtualRegisters(src1, regT0, src2, regT1);
574
575 // Jump slow if both are cells (to cover strings).
576 move(regT0, regT2);
577 or64(regT1, regT2);
578 addSlowCase(branchIfCell(regT2));
579
580 // Jump slow if either is a double. First test if it's an integer, which is fine, and then test
581 // if it's a double.
582 Jump leftOK = branchIfInt32(regT0);
583 addSlowCase(branchIfNumber(regT0));
584 leftOK.link(this);
585 Jump rightOK = branchIfInt32(regT1);
586 addSlowCase(branchIfNumber(regT1));
587 rightOK.link(this);
588
589 if (type == CompileOpStrictEqType::StrictEq)
590 addJump(branch64(Equal, regT1, regT0), target);
591 else
592 addJump(branch64(NotEqual, regT1, regT0), target);
593}
594
595void JIT::emit_op_jstricteq(const Instruction* currentInstruction)
596{
597 compileOpStrictEqJump<OpJstricteq>(currentInstruction, CompileOpStrictEqType::StrictEq);
598}
599
600void JIT::emit_op_jnstricteq(const Instruction* currentInstruction)
601{
602 compileOpStrictEqJump<OpJnstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq);
603}
604
605void JIT::emitSlow_op_jstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
606{
607 linkAllSlowCases(iter);
608
609 auto bytecode = currentInstruction->as<OpJstricteq>();
610 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
611 callOperation(operationCompareStrictEq, regT0, regT1);
612 emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
613}
614
615void JIT::emitSlow_op_jnstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
616{
617 linkAllSlowCases(iter);
618
619 auto bytecode = currentInstruction->as<OpJnstricteq>();
620 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
621 callOperation(operationCompareStrictEq, regT0, regT1);
622 emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
623}
624
625void JIT::emit_op_to_number(const Instruction* currentInstruction)
626{
627 auto bytecode = currentInstruction->as<OpToNumber>();
628 int dstVReg = bytecode.m_dst.offset();
629 int srcVReg = bytecode.m_operand.offset();
630 emitGetVirtualRegister(srcVReg, regT0);
631
632 addSlowCase(branchIfNotNumber(regT0));
633
634 emitValueProfilingSite(bytecode.metadata(m_codeBlock));
635 if (srcVReg != dstVReg)
636 emitPutVirtualRegister(dstVReg);
637}
638
639void JIT::emit_op_to_string(const Instruction* currentInstruction)
640{
641 auto bytecode = currentInstruction->as<OpToString>();
642 int srcVReg = bytecode.m_operand.offset();
643 emitGetVirtualRegister(srcVReg, regT0);
644
645 addSlowCase(branchIfNotCell(regT0));
646 addSlowCase(branchIfNotString(regT0));
647
648 emitPutVirtualRegister(bytecode.m_dst.offset());
649}
650
651void JIT::emit_op_to_object(const Instruction* currentInstruction)
652{
653 auto bytecode = currentInstruction->as<OpToObject>();
654 int dstVReg = bytecode.m_dst.offset();
655 int srcVReg = bytecode.m_operand.offset();
656 emitGetVirtualRegister(srcVReg, regT0);
657
658 addSlowCase(branchIfNotCell(regT0));
659 addSlowCase(branchIfNotObject(regT0));
660
661 emitValueProfilingSite(bytecode.metadata(m_codeBlock));
662 if (srcVReg != dstVReg)
663 emitPutVirtualRegister(dstVReg);
664}
665
666void JIT::emit_op_catch(const Instruction* currentInstruction)
667{
668 auto bytecode = currentInstruction->as<OpCatch>();
669
670 restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
671
672 move(TrustedImmPtr(m_vm), regT3);
673 load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
674 storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
675
676 addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
677
678 callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
679 Jump isCatchableException = branchTest32(Zero, returnValueGPR);
680 jumpToExceptionHandler(*vm());
681 isCatchableException.link(this);
682
683 move(TrustedImmPtr(m_vm), regT3);
684 load64(Address(regT3, VM::exceptionOffset()), regT0);
685 store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset()));
686 emitPutVirtualRegister(bytecode.m_exception.offset());
687
688 load64(Address(regT0, Exception::valueOffset()), regT0);
689 emitPutVirtualRegister(bytecode.m_thrownValue.offset());
690
691#if ENABLE(DFG_JIT)
692 // FIXME: consider inline caching the process of doing OSR entry, including
693 // argument type proofs, storing locals to the buffer, etc
694 // https://bugs.webkit.org/show_bug.cgi?id=175598
695
696 auto& metadata = bytecode.metadata(m_codeBlock);
697 ValueProfileAndOperandBuffer* buffer = metadata.m_buffer;
698 if (buffer || !shouldEmitProfiling())
699 callOperation(operationTryOSREnterAtCatch, m_bytecodeOffset);
700 else
701 callOperation(operationTryOSREnterAtCatchAndValueProfile, m_bytecodeOffset);
702 auto skipOSREntry = branchTestPtr(Zero, returnValueGPR);
703 emitRestoreCalleeSaves();
704 jump(returnValueGPR, ExceptionHandlerPtrTag);
705 skipOSREntry.link(this);
706 if (buffer && shouldEmitProfiling()) {
707 buffer->forEach([&] (ValueProfileAndOperand& profile) {
708 JSValueRegs regs(regT0);
709 emitGetVirtualRegister(profile.m_operand, regs);
710 emitValueProfilingSite(static_cast<ValueProfile&>(profile));
711 });
712 }
713#endif // ENABLE(DFG_JIT)
714}
715
716void JIT::emit_op_identity_with_profile(const Instruction*)
717{
718 // We don't need to do anything here...
719}
720
721void JIT::emit_op_get_parent_scope(const Instruction* currentInstruction)
722{
723 auto bytecode = currentInstruction->as<OpGetParentScope>();
724 int currentScope = bytecode.m_scope.offset();
725 emitGetVirtualRegister(currentScope, regT0);
726 loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
727 emitStoreCell(bytecode.m_dst.offset(), regT0);
728}
729
730void JIT::emit_op_switch_imm(const Instruction* currentInstruction)
731{
732 auto bytecode = currentInstruction->as<OpSwitchImm>();
733 size_t tableIndex = bytecode.m_tableIndex;
734 unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset);
735 unsigned scrutinee = bytecode.m_scrutinee.offset();
736
737 // create jump table for switch destinations, track this switch statement.
738 SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
739 m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
740 jumpTable->ensureCTITable();
741
742 emitGetVirtualRegister(scrutinee, regT0);
743 callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex);
744 jump(returnValueGPR, JSSwitchPtrTag);
745}
746
747void JIT::emit_op_switch_char(const Instruction* currentInstruction)
748{
749 auto bytecode = currentInstruction->as<OpSwitchChar>();
750 size_t tableIndex = bytecode.m_tableIndex;
751 unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset);
752 unsigned scrutinee = bytecode.m_scrutinee.offset();
753
754 // create jump table for switch destinations, track this switch statement.
755 SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
756 m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
757 jumpTable->ensureCTITable();
758
759 emitGetVirtualRegister(scrutinee, regT0);
760 callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex);
761 jump(returnValueGPR, JSSwitchPtrTag);
762}
763
764void JIT::emit_op_switch_string(const Instruction* currentInstruction)
765{
766 auto bytecode = currentInstruction->as<OpSwitchString>();
767 size_t tableIndex = bytecode.m_tableIndex;
768 unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset);
769 unsigned scrutinee = bytecode.m_scrutinee.offset();
770
771 // create jump table for switch destinations, track this switch statement.
772 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
773 m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
774
775 emitGetVirtualRegister(scrutinee, regT0);
776 callOperation(operationSwitchStringWithUnknownKeyType, regT0, tableIndex);
777 jump(returnValueGPR, JSSwitchPtrTag);
778}
779
780void JIT::emit_op_debug(const Instruction* currentInstruction)
781{
782 auto bytecode = currentInstruction->as<OpDebug>();
783 load32(codeBlock()->debuggerRequestsAddress(), regT0);
784 Jump noDebuggerRequests = branchTest32(Zero, regT0);
785 callOperation(operationDebug, static_cast<int>(bytecode.m_debugHookType));
786 noDebuggerRequests.link(this);
787}
788
789void JIT::emit_op_eq_null(const Instruction* currentInstruction)
790{
791 auto bytecode = currentInstruction->as<OpEqNull>();
792 int dst = bytecode.m_dst.offset();
793 int src1 = bytecode.m_operand.offset();
794
795 emitGetVirtualRegister(src1, regT0);
796 Jump isImmediate = branchIfNotCell(regT0);
797
798 Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
799 move(TrustedImm32(0), regT0);
800 Jump wasNotMasqueradesAsUndefined = jump();
801
802 isMasqueradesAsUndefined.link(this);
803 emitLoadStructure(*vm(), regT0, regT2, regT1);
804 move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
805 loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
806 comparePtr(Equal, regT0, regT2, regT0);
807 Jump wasNotImmediate = jump();
808
809 isImmediate.link(this);
810
811 and64(TrustedImm32(~TagBitUndefined), regT0);
812 compare64(Equal, regT0, TrustedImm32(ValueNull), regT0);
813
814 wasNotImmediate.link(this);
815 wasNotMasqueradesAsUndefined.link(this);
816
817 boxBoolean(regT0, JSValueRegs { regT0 });
818 emitPutVirtualRegister(dst);
819
820}
821
822void JIT::emit_op_neq_null(const Instruction* currentInstruction)
823{
824 auto bytecode = currentInstruction->as<OpNeqNull>();
825 int dst = bytecode.m_dst.offset();
826 int src1 = bytecode.m_operand.offset();
827
828 emitGetVirtualRegister(src1, regT0);
829 Jump isImmediate = branchIfNotCell(regT0);
830
831 Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
832 move(TrustedImm32(1), regT0);
833 Jump wasNotMasqueradesAsUndefined = jump();
834
835 isMasqueradesAsUndefined.link(this);
836 emitLoadStructure(*vm(), regT0, regT2, regT1);
837 move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
838 loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
839 comparePtr(NotEqual, regT0, regT2, regT0);
840 Jump wasNotImmediate = jump();
841
842 isImmediate.link(this);
843
844 and64(TrustedImm32(~TagBitUndefined), regT0);
845 compare64(NotEqual, regT0, TrustedImm32(ValueNull), regT0);
846
847 wasNotImmediate.link(this);
848 wasNotMasqueradesAsUndefined.link(this);
849
850 boxBoolean(regT0, JSValueRegs { regT0 });
851 emitPutVirtualRegister(dst);
852}
853
854void JIT::emit_op_enter(const Instruction*)
855{
856 // Even though CTI doesn't use them, we initialize our constant
857 // registers to zap stale pointers, to avoid unnecessarily prolonging
858 // object lifetime and increasing GC pressure.
859 size_t count = m_codeBlock->numVars();
860 for (size_t j = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); j < count; ++j)
861 emitInitRegister(virtualRegisterForLocal(j).offset());
862
863 emitWriteBarrier(m_codeBlock);
864
865 emitEnterOptimizationCheck();
866}
867
868void JIT::emit_op_get_scope(const Instruction* currentInstruction)
869{
870 auto bytecode = currentInstruction->as<OpGetScope>();
871 int dst = bytecode.m_dst.offset();
872 emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0);
873 loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
874 emitStoreCell(dst, regT0);
875}
876
877void JIT::emit_op_to_this(const Instruction* currentInstruction)
878{
879 auto bytecode = currentInstruction->as<OpToThis>();
880 auto& metadata = bytecode.metadata(m_codeBlock);
881 StructureID* cachedStructureID = &metadata.m_cachedStructureID;
882 emitGetVirtualRegister(bytecode.m_srcDst.offset(), regT1);
883
884 emitJumpSlowCaseIfNotJSCell(regT1);
885
886 addSlowCase(branchIfNotType(regT1, FinalObjectType));
887 load32(cachedStructureID, regT2);
888 addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2));
889}
890
891void JIT::emit_op_create_this(const Instruction* currentInstruction)
892{
893 auto bytecode = currentInstruction->as<OpCreateThis>();
894 auto& metadata = bytecode.metadata(m_codeBlock);
895 int callee = bytecode.m_callee.offset();
896 WriteBarrierBase<JSCell>* cachedFunction = &metadata.m_cachedCallee;
897 RegisterID calleeReg = regT0;
898 RegisterID rareDataReg = regT4;
899 RegisterID resultReg = regT0;
900 RegisterID allocatorReg = regT1;
901 RegisterID structureReg = regT2;
902 RegisterID cachedFunctionReg = regT4;
903 RegisterID scratchReg = regT3;
904
905 emitGetVirtualRegister(callee, calleeReg);
906 addSlowCase(branchIfNotFunction(calleeReg));
907 loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
908 addSlowCase(branchTestPtr(Zero, rareDataReg));
909 loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfAllocator()), allocatorReg);
910 loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfStructure()), structureReg);
911
912 loadPtr(cachedFunction, cachedFunctionReg);
913 Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
914 addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
915 hasSeenMultipleCallees.link(this);
916
917 JumpList slowCases;
918 auto butterfly = TrustedImmPtr(nullptr);
919 emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, scratchReg, slowCases);
920 load8(Address(structureReg, Structure::inlineCapacityOffset()), scratchReg);
921 emitInitializeInlineStorage(resultReg, scratchReg);
922 addSlowCase(slowCases);
923 emitPutVirtualRegister(bytecode.m_dst.offset());
924}
925
926void JIT::emit_op_check_tdz(const Instruction* currentInstruction)
927{
928 auto bytecode = currentInstruction->as<OpCheckTdz>();
929 emitGetVirtualRegister(bytecode.m_targetVirtualRegister.offset(), regT0);
930 addSlowCase(branchIfEmpty(regT0));
931}
932
933
934// Slow cases
935
936void JIT::emitSlow_op_eq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
937{
938 linkAllSlowCases(iter);
939
940 auto bytecode = currentInstruction->as<OpEq>();
941 callOperation(operationCompareEq, regT0, regT1);
942 boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
943 emitPutVirtualRegister(bytecode.m_dst.offset(), returnValueGPR);
944}
945
946void JIT::emitSlow_op_neq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
947{
948 linkAllSlowCases(iter);
949
950 auto bytecode = currentInstruction->as<OpNeq>();
951 callOperation(operationCompareEq, regT0, regT1);
952 xor32(TrustedImm32(0x1), regT0);
953 boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
954 emitPutVirtualRegister(bytecode.m_dst.offset(), returnValueGPR);
955}
956
957void JIT::emitSlow_op_jeq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
958{
959 linkAllSlowCases(iter);
960
961 auto bytecode = currentInstruction->as<OpJeq>();
962 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
963 callOperation(operationCompareEq, regT0, regT1);
964 emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
965}
966
967void JIT::emitSlow_op_jneq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
968{
969 linkAllSlowCases(iter);
970
971 auto bytecode = currentInstruction->as<OpJneq>();
972 unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel);
973 callOperation(operationCompareEq, regT0, regT1);
974 emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target);
975}
976
977void JIT::emitSlow_op_instanceof_custom(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
978{
979 linkAllSlowCases(iter);
980
981 auto bytecode = currentInstruction->as<OpInstanceofCustom>();
982 int dst = bytecode.m_dst.offset();
983 int value = bytecode.m_value.offset();
984 int constructor = bytecode.m_constructor.offset();
985 int hasInstanceValue = bytecode.m_hasInstanceValue.offset();
986
987 emitGetVirtualRegister(value, regT0);
988 emitGetVirtualRegister(constructor, regT1);
989 emitGetVirtualRegister(hasInstanceValue, regT2);
990 callOperation(operationInstanceOfCustom, regT0, regT1, regT2);
991 boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR });
992 emitPutVirtualRegister(dst, returnValueGPR);
993}
994
995#endif // USE(JSVALUE64)
996
997void JIT::emit_op_loop_hint(const Instruction*)
998{
999 // Emit the JIT optimization check:
1000 if (canBeOptimized()) {
1001 addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
1002 AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
1003 }
1004}
1005
1006void JIT::emitSlow_op_loop_hint(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1007{
1008#if ENABLE(DFG_JIT)
1009 // Emit the slow path for the JIT optimization check:
1010 if (canBeOptimized()) {
1011 linkAllSlowCases(iter);
1012
1013 copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
1014
1015 callOperation(operationOptimize, m_bytecodeOffset);
1016 Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
1017 if (!ASSERT_DISABLED) {
1018 Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
1019 abortWithReason(JITUnreasonableLoopHintJumpTarget);
1020 ok.link(this);
1021 }
1022 jump(returnValueGPR, GPRInfo::callFrameRegister);
1023 noOptimizedEntry.link(this);
1024
1025 emitJumpSlowToHot(jump(), currentInstruction->size());
1026 }
1027#else
1028 UNUSED_PARAM(currentInstruction);
1029 UNUSED_PARAM(iter);
1030#endif
1031}
1032
1033void JIT::emit_op_check_traps(const Instruction*)
1034{
1035 addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->needTrapHandlingAddress())));
1036}
1037
1038void JIT::emit_op_nop(const Instruction*)
1039{
1040}
1041
1042void JIT::emit_op_super_sampler_begin(const Instruction*)
1043{
1044 add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount)));
1045}
1046
1047void JIT::emit_op_super_sampler_end(const Instruction*)
1048{
1049 sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount)));
1050}
1051
1052void JIT::emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator& iter)
1053{
1054 linkAllSlowCases(iter);
1055
1056 callOperation(operationHandleTraps);
1057}
1058
1059void JIT::emit_op_new_regexp(const Instruction* currentInstruction)
1060{
1061 auto bytecode = currentInstruction->as<OpNewRegexp>();
1062 int dst = bytecode.m_dst.offset();
1063 int regexp = bytecode.m_regexp.offset();
1064 callOperation(operationNewRegexp, jsCast<RegExp*>(m_codeBlock->getConstant(regexp)));
1065 emitStoreCell(dst, returnValueGPR);
1066}
1067
1068template<typename Op>
1069void JIT::emitNewFuncCommon(const Instruction* currentInstruction)
1070{
1071 Jump lazyJump;
1072 auto bytecode = currentInstruction->as<Op>();
1073 int dst = bytecode.m_dst.offset();
1074
1075#if USE(JSVALUE64)
1076 emitGetVirtualRegister(bytecode.m_scope.offset(), regT0);
1077#else
1078 emitLoadPayload(bytecode.m_scope.offset(), regT0);
1079#endif
1080 FunctionExecutable* funcExec = m_codeBlock->functionDecl(bytecode.m_functionDecl);
1081
1082 OpcodeID opcodeID = Op::opcodeID;
1083 if (opcodeID == op_new_func)
1084 callOperation(operationNewFunction, dst, regT0, funcExec);
1085 else if (opcodeID == op_new_generator_func)
1086 callOperation(operationNewGeneratorFunction, dst, regT0, funcExec);
1087 else if (opcodeID == op_new_async_func)
1088 callOperation(operationNewAsyncFunction, dst, regT0, funcExec);
1089 else {
1090 ASSERT(opcodeID == op_new_async_generator_func);
1091 callOperation(operationNewAsyncGeneratorFunction, dst, regT0, funcExec);
1092 }
1093}
1094
1095void JIT::emit_op_new_func(const Instruction* currentInstruction)
1096{
1097 emitNewFuncCommon<OpNewFunc>(currentInstruction);
1098}
1099
1100void JIT::emit_op_new_generator_func(const Instruction* currentInstruction)
1101{
1102 emitNewFuncCommon<OpNewGeneratorFunc>(currentInstruction);
1103}
1104
1105void JIT::emit_op_new_async_generator_func(const Instruction* currentInstruction)
1106{
1107 emitNewFuncCommon<OpNewAsyncGeneratorFunc>(currentInstruction);
1108}
1109
1110void JIT::emit_op_new_async_func(const Instruction* currentInstruction)
1111{
1112 emitNewFuncCommon<OpNewAsyncFunc>(currentInstruction);
1113}
1114
1115template<typename Op>
1116void JIT::emitNewFuncExprCommon(const Instruction* currentInstruction)
1117{
1118 auto bytecode = currentInstruction->as<Op>();
1119 int dst = bytecode.m_dst.offset();
1120#if USE(JSVALUE64)
1121 emitGetVirtualRegister(bytecode.m_scope.offset(), regT0);
1122#else
1123 emitLoadPayload(bytecode.m_scope.offset(), regT0);
1124#endif
1125
1126 FunctionExecutable* function = m_codeBlock->functionExpr(bytecode.m_functionDecl);
1127 OpcodeID opcodeID = Op::opcodeID;
1128
1129 if (opcodeID == op_new_func_exp)
1130 callOperation(operationNewFunction, dst, regT0, function);
1131 else if (opcodeID == op_new_generator_func_exp)
1132 callOperation(operationNewGeneratorFunction, dst, regT0, function);
1133 else if (opcodeID == op_new_async_func_exp)
1134 callOperation(operationNewAsyncFunction, dst, regT0, function);
1135 else {
1136 ASSERT(opcodeID == op_new_async_generator_func_exp);
1137 callOperation(operationNewAsyncGeneratorFunction, dst, regT0, function);
1138 }
1139}
1140
1141void JIT::emit_op_new_func_exp(const Instruction* currentInstruction)
1142{
1143 emitNewFuncExprCommon<OpNewFuncExp>(currentInstruction);
1144}
1145
1146void JIT::emit_op_new_generator_func_exp(const Instruction* currentInstruction)
1147{
1148 emitNewFuncExprCommon<OpNewGeneratorFuncExp>(currentInstruction);
1149}
1150
1151void JIT::emit_op_new_async_func_exp(const Instruction* currentInstruction)
1152{
1153 emitNewFuncExprCommon<OpNewAsyncFuncExp>(currentInstruction);
1154}
1155
1156void JIT::emit_op_new_async_generator_func_exp(const Instruction* currentInstruction)
1157{
1158 emitNewFuncExprCommon<OpNewAsyncGeneratorFuncExp>(currentInstruction);
1159}
1160
1161void JIT::emit_op_new_array(const Instruction* currentInstruction)
1162{
1163 auto bytecode = currentInstruction->as<OpNewArray>();
1164 auto& metadata = bytecode.metadata(m_codeBlock);
1165 int dst = bytecode.m_dst.offset();
1166 int valuesIndex = bytecode.m_argv.offset();
1167 int size = bytecode.m_argc;
1168 addPtr(TrustedImm32(valuesIndex * sizeof(Register)), callFrameRegister, regT0);
1169 callOperation(operationNewArrayWithProfile, dst,
1170 &metadata.m_arrayAllocationProfile, regT0, size);
1171}
1172
1173void JIT::emit_op_new_array_with_size(const Instruction* currentInstruction)
1174{
1175 auto bytecode = currentInstruction->as<OpNewArrayWithSize>();
1176 auto& metadata = bytecode.metadata(m_codeBlock);
1177 int dst = bytecode.m_dst.offset();
1178 int sizeIndex = bytecode.m_length.offset();
1179#if USE(JSVALUE64)
1180 emitGetVirtualRegister(sizeIndex, regT0);
1181 callOperation(operationNewArrayWithSizeAndProfile, dst,
1182 &metadata.m_arrayAllocationProfile, regT0);
1183#else
1184 emitLoad(sizeIndex, regT1, regT0);
1185 callOperation(operationNewArrayWithSizeAndProfile, dst,
1186 &metadata.m_arrayAllocationProfile, JSValueRegs(regT1, regT0));
1187#endif
1188}
1189
1190#if USE(JSVALUE64)
1191void JIT::emit_op_has_structure_property(const Instruction* currentInstruction)
1192{
1193 auto bytecode = currentInstruction->as<OpHasStructureProperty>();
1194 int dst = bytecode.m_dst.offset();
1195 int base = bytecode.m_base.offset();
1196 int enumerator = bytecode.m_enumerator.offset();
1197
1198 emitGetVirtualRegister(base, regT0);
1199 emitGetVirtualRegister(enumerator, regT1);
1200 emitJumpSlowCaseIfNotJSCell(regT0, base);
1201
1202 load32(Address(regT0, JSCell::structureIDOffset()), regT0);
1203 addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
1204
1205 move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
1206 emitPutVirtualRegister(dst);
1207}
1208
1209void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
1210{
1211 const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
1212
1213 PatchableJump badType;
1214
1215 // FIXME: Add support for other types like TypedArrays and Arguments.
1216 // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
1217 JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType);
1218 move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
1219 Jump done = jump();
1220
1221 LinkBuffer patchBuffer(*this, m_codeBlock);
1222
1223 patchBuffer.link(badType, byValInfo->slowPathTarget);
1224 patchBuffer.link(slowCases, byValInfo->slowPathTarget);
1225
1226 patchBuffer.link(done, byValInfo->badTypeDoneTarget);
1227
1228 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
1229 m_codeBlock, patchBuffer, JITStubRoutinePtrTag,
1230 "Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value());
1231
1232 MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code()));
1233 MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationHasIndexedPropertyGeneric));
1234}
1235
1236void JIT::emit_op_has_indexed_property(const Instruction* currentInstruction)
1237{
1238 auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
1239 auto& metadata = bytecode.metadata(m_codeBlock);
1240 int dst = bytecode.m_dst.offset();
1241 int base = bytecode.m_base.offset();
1242 int property = bytecode.m_property.offset();
1243 ArrayProfile* profile = &metadata.m_arrayProfile;
1244 ByValInfo* byValInfo = m_codeBlock->addByValInfo();
1245
1246 emitGetVirtualRegisters(base, regT0, property, regT1);
1247
1248 emitJumpSlowCaseIfNotInt(regT1);
1249
1250 // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
1251 // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
1252 // number was signed since m_vectorLength is always less than intmax (since the total allocation
1253 // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
1254 // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
1255 // extending since it makes it easier to re-tag the value in the slow case.
1256 zeroExtend32ToPtr(regT1, regT1);
1257
1258 emitJumpSlowCaseIfNotJSCell(regT0, base);
1259 emitArrayProfilingSiteWithCell(regT0, regT2, profile);
1260 and32(TrustedImm32(IndexingShapeMask), regT2);
1261
1262 JITArrayMode mode = chooseArrayMode(profile);
1263 PatchableJump badType;
1264
1265 // FIXME: Add support for other types like TypedArrays and Arguments.
1266 // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
1267 JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType);
1268
1269 move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
1270
1271 addSlowCase(badType);
1272 addSlowCase(slowCases);
1273
1274 Label done = label();
1275
1276 emitPutVirtualRegister(dst);
1277
1278 Label nextHotPath = label();
1279
1280 m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
1281}
1282
1283void JIT::emitSlow_op_has_indexed_property(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1284{
1285 linkAllSlowCases(iter);
1286
1287 auto bytecode = currentInstruction->as<OpHasIndexedProperty>();
1288 int dst = bytecode.m_dst.offset();
1289 int base = bytecode.m_base.offset();
1290 int property = bytecode.m_property.offset();
1291 ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
1292
1293 Label slowPath = label();
1294
1295 emitGetVirtualRegister(base, regT0);
1296 emitGetVirtualRegister(property, regT1);
1297 Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, byValInfo);
1298
1299 m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
1300 m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
1301 m_byValInstructionIndex++;
1302}
1303
1304void JIT::emit_op_get_direct_pname(const Instruction* currentInstruction)
1305{
1306 auto bytecode = currentInstruction->as<OpGetDirectPname>();
1307 int dst = bytecode.m_dst.offset();
1308 int base = bytecode.m_base.offset();
1309 int index = bytecode.m_index.offset();
1310 int enumerator = bytecode.m_enumerator.offset();
1311
1312 // Check that base is a cell
1313 emitGetVirtualRegister(base, regT0);
1314 emitJumpSlowCaseIfNotJSCell(regT0, base);
1315
1316 // Check the structure
1317 emitGetVirtualRegister(enumerator, regT2);
1318 load32(Address(regT0, JSCell::structureIDOffset()), regT1);
1319 addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset())));
1320
1321 // Compute the offset
1322 emitGetVirtualRegister(index, regT1);
1323 // If index is less than the enumerator's cached inline storage, then it's an inline access
1324 Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
1325 addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0);
1326 signExtend32ToPtr(regT1, regT1);
1327 load64(BaseIndex(regT0, regT1, TimesEight), regT0);
1328
1329 Jump done = jump();
1330
1331 // Otherwise it's out of line
1332 outOfLineAccess.link(this);
1333 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
1334 sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1);
1335 neg32(regT1);
1336 signExtend32ToPtr(regT1, regT1);
1337 int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
1338 load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0);
1339
1340 done.link(this);
1341 emitValueProfilingSite(bytecode.metadata(m_codeBlock));
1342 emitPutVirtualRegister(dst, regT0);
1343}
1344
1345void JIT::emit_op_enumerator_structure_pname(const Instruction* currentInstruction)
1346{
1347 auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>();
1348 int dst = bytecode.m_dst.offset();
1349 int enumerator = bytecode.m_enumerator.offset();
1350 int index = bytecode.m_index.offset();
1351
1352 emitGetVirtualRegister(index, regT0);
1353 emitGetVirtualRegister(enumerator, regT1);
1354 Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()));
1355
1356 move(TrustedImm64(JSValue::encode(jsNull())), regT0);
1357
1358 Jump done = jump();
1359 inBounds.link(this);
1360
1361 loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
1362 signExtend32ToPtr(regT0, regT0);
1363 load64(BaseIndex(regT1, regT0, TimesEight), regT0);
1364
1365 done.link(this);
1366 emitPutVirtualRegister(dst);
1367}
1368
1369void JIT::emit_op_enumerator_generic_pname(const Instruction* currentInstruction)
1370{
1371 auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>();
1372 int dst = bytecode.m_dst.offset();
1373 int enumerator = bytecode.m_enumerator.offset();
1374 int index = bytecode.m_index.offset();
1375
1376 emitGetVirtualRegister(index, regT0);
1377 emitGetVirtualRegister(enumerator, regT1);
1378 Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
1379
1380 move(TrustedImm64(JSValue::encode(jsNull())), regT0);
1381
1382 Jump done = jump();
1383 inBounds.link(this);
1384
1385 loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
1386 signExtend32ToPtr(regT0, regT0);
1387 load64(BaseIndex(regT1, regT0, TimesEight), regT0);
1388
1389 done.link(this);
1390 emitPutVirtualRegister(dst);
1391}
1392
1393void JIT::emit_op_profile_type(const Instruction* currentInstruction)
1394{
1395 auto bytecode = currentInstruction->as<OpProfileType>();
1396 auto& metadata = bytecode.metadata(m_codeBlock);
1397 TypeLocation* cachedTypeLocation = metadata.m_typeLocation;
1398 int valueToProfile = bytecode.m_targetVirtualRegister.offset();
1399
1400 emitGetVirtualRegister(valueToProfile, regT0);
1401
1402 JumpList jumpToEnd;
1403
1404 jumpToEnd.append(branchIfEmpty(regT0));
1405
1406 // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
1407 // These typechecks are inlined to match those of the 64-bit JSValue type checks.
1408 if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
1409 jumpToEnd.append(branchIfUndefined(regT0));
1410 else if (cachedTypeLocation->m_lastSeenType == TypeNull)
1411 jumpToEnd.append(branchIfNull(regT0));
1412 else if (cachedTypeLocation->m_lastSeenType == TypeBoolean)
1413 jumpToEnd.append(branchIfBoolean(regT0, regT1));
1414 else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt)
1415 jumpToEnd.append(branchIfInt32(regT0));
1416 else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
1417 jumpToEnd.append(branchIfNumber(regT0));
1418 else if (cachedTypeLocation->m_lastSeenType == TypeString) {
1419 Jump isNotCell = branchIfNotCell(regT0);
1420 jumpToEnd.append(branchIfString(regT0));
1421 isNotCell.link(this);
1422 }
1423
1424 // Load the type profiling log into T2.
1425 TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
1426 move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
1427 // Load the next log entry into T1.
1428 loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1);
1429
1430 // Store the JSValue onto the log entry.
1431 store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset()));
1432
1433 // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry.
1434 Jump notCell = branchIfNotCell(regT0);
1435 load32(Address(regT0, JSCell::structureIDOffset()), regT0);
1436 store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
1437 Jump skipIsCell = jump();
1438 notCell.link(this);
1439 store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
1440 skipIsCell.link(this);
1441
1442 // Store the typeLocation on the log entry.
1443 move(TrustedImmPtr(cachedTypeLocation), regT0);
1444 store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset()));
1445
1446 // Increment the current log entry.
1447 addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1);
1448 store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
1449 Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
1450 // Clear the log if we're at the end of the log.
1451 callOperation(operationProcessTypeProfilerLog);
1452 skipClearLog.link(this);
1453
1454 jumpToEnd.link(this);
1455}
1456
1457void JIT::emit_op_log_shadow_chicken_prologue(const Instruction* currentInstruction)
1458{
1459 RELEASE_ASSERT(vm()->shadowChicken());
1460 updateTopCallFrame();
1461 static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
1462 auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>();
1463 GPRReg shadowPacketReg = regT0;
1464 GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
1465 GPRReg scratch2Reg = regT2;
1466 ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
1467 emitGetVirtualRegister(bytecode.m_scope.offset(), regT3);
1468 logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3);
1469}
1470
1471void JIT::emit_op_log_shadow_chicken_tail(const Instruction* currentInstruction)
1472{
1473 RELEASE_ASSERT(vm()->shadowChicken());
1474 updateTopCallFrame();
1475 static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
1476 auto bytecode = currentInstruction->as<OpLogShadowChickenTail>();
1477 GPRReg shadowPacketReg = regT0;
1478 GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
1479 GPRReg scratch2Reg = regT2;
1480 ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
1481 emitGetVirtualRegister(bytecode.m_thisValue.offset(), regT2);
1482 emitGetVirtualRegister(bytecode.m_scope.offset(), regT3);
1483 logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeOffset));
1484}
1485
1486#endif // USE(JSVALUE64)
1487
1488void JIT::emit_op_profile_control_flow(const Instruction* currentInstruction)
1489{
1490 auto bytecode = currentInstruction->as<OpProfileControlFlow>();
1491 auto& metadata = bytecode.metadata(m_codeBlock);
1492 BasicBlockLocation* basicBlockLocation = metadata.m_basicBlockLocation;
1493#if USE(JSVALUE64)
1494 basicBlockLocation->emitExecuteCode(*this);
1495#else
1496 basicBlockLocation->emitExecuteCode(*this, regT0);
1497#endif
1498}
1499
1500void JIT::emit_op_argument_count(const Instruction* currentInstruction)
1501{
1502 auto bytecode = currentInstruction->as<OpArgumentCount>();
1503 int dst = bytecode.m_dst.offset();
1504 load32(payloadFor(CallFrameSlot::argumentCount), regT0);
1505 sub32(TrustedImm32(1), regT0);
1506 JSValueRegs result = JSValueRegs::withTwoAvailableRegs(regT0, regT1);
1507 boxInt32(regT0, result);
1508 emitPutVirtualRegister(dst, result);
1509}
1510
1511void JIT::emit_op_get_rest_length(const Instruction* currentInstruction)
1512{
1513 auto bytecode = currentInstruction->as<OpGetRestLength>();
1514 int dst = bytecode.m_dst.offset();
1515 unsigned numParamsToSkip = bytecode.m_numParametersToSkip;
1516 load32(payloadFor(CallFrameSlot::argumentCount), regT0);
1517 sub32(TrustedImm32(1), regT0);
1518 Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip));
1519 sub32(Imm32(numParamsToSkip), regT0);
1520#if USE(JSVALUE64)
1521 boxInt32(regT0, JSValueRegs(regT0));
1522#endif
1523 Jump done = jump();
1524
1525 zeroLength.link(this);
1526#if USE(JSVALUE64)
1527 move(TrustedImm64(JSValue::encode(jsNumber(0))), regT0);
1528#else
1529 move(TrustedImm32(0), regT0);
1530#endif
1531
1532 done.link(this);
1533#if USE(JSVALUE64)
1534 emitPutVirtualRegister(dst, regT0);
1535#else
1536 move(TrustedImm32(JSValue::Int32Tag), regT1);
1537 emitPutVirtualRegister(dst, JSValueRegs(regT1, regT0));
1538#endif
1539}
1540
1541void JIT::emit_op_get_argument(const Instruction* currentInstruction)
1542{
1543 auto bytecode = currentInstruction->as<OpGetArgument>();
1544 int dst = bytecode.m_dst.offset();
1545 int index = bytecode.m_index;
1546#if USE(JSVALUE64)
1547 JSValueRegs resultRegs(regT0);
1548#else
1549 JSValueRegs resultRegs(regT1, regT0);
1550#endif
1551
1552 load32(payloadFor(CallFrameSlot::argumentCount), regT2);
1553 Jump argumentOutOfBounds = branch32(LessThanOrEqual, regT2, TrustedImm32(index));
1554 loadValue(addressFor(CallFrameSlot::thisArgument + index), resultRegs);
1555 Jump done = jump();
1556
1557 argumentOutOfBounds.link(this);
1558 moveValue(jsUndefined(), resultRegs);
1559
1560 done.link(this);
1561 emitValueProfilingSite(bytecode.metadata(m_codeBlock));
1562 emitPutVirtualRegister(dst, resultRegs);
1563}
1564
1565} // namespace JSC
1566
1567#endif // ENABLE(JIT)
1568