1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(JIT)
29#include "JSCInlines.h"
30
31namespace JSC {
32
33ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(const Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType)
34{
35 switch (arrayMode) {
36 case JITInt32:
37 return emitInt32Load(currentInstruction, badType);
38 case JITDouble:
39 return emitDoubleLoad(currentInstruction, badType);
40 case JITContiguous:
41 return emitContiguousLoad(currentInstruction, badType);
42 case JITArrayStorage:
43 return emitArrayStorageLoad(currentInstruction, badType);
44 default:
45 break;
46 }
47 RELEASE_ASSERT_NOT_REACHED();
48 return MacroAssembler::JumpList();
49}
50
51ALWAYS_INLINE bool JIT::isOperandConstantDouble(int src)
52{
53 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
54}
55
56ALWAYS_INLINE JSValue JIT::getConstantOperand(int src)
57{
58 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
59 return m_codeBlock->getConstant(src);
60}
61
62ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, int entry)
63{
64#if USE(JSVALUE32_64)
65 store32(TrustedImm32(JSValue::Int32Tag), tagFor(entry));
66 store32(from, payloadFor(entry));
67#else
68 store64(from, addressFor(entry));
69#endif
70}
71
72ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
73{
74 failures.append(branchIfNotString(src));
75 loadPtr(MacroAssembler::Address(src, JSString::offsetOfValue()), dst);
76 failures.append(branchIfRopeStringImpl(dst));
77 failures.append(branch32(NotEqual, MacroAssembler::Address(dst, StringImpl::lengthMemoryOffset()), TrustedImm32(1)));
78 loadPtr(MacroAssembler::Address(dst, StringImpl::dataOffset()), regT1);
79
80 auto is16Bit = branchTest32(Zero, Address(dst, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
81 load8(MacroAssembler::Address(regT1, 0), dst);
82 auto done = jump();
83 is16Bit.link(this);
84 load16(MacroAssembler::Address(regT1, 0), dst);
85 done.link(this);
86}
87
88ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr<NoPtrTag> target)
89{
90 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
91 Call nakedCall = nearCall();
92 m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, FunctionPtr<OperationPtrTag>(target.retagged<OperationPtrTag>())));
93 return nakedCall;
94}
95
96ALWAYS_INLINE JIT::Call JIT::emitNakedTailCall(CodePtr<NoPtrTag> target)
97{
98 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
99 Call nakedCall = nearTailCall();
100 m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, FunctionPtr<OperationPtrTag>(target.retagged<OperationPtrTag>())));
101 return nakedCall;
102}
103
104ALWAYS_INLINE void JIT::updateTopCallFrame()
105{
106#if USE(JSVALUE32_64)
107 const Instruction* instruction = m_codeBlock->instructions().at(m_bytecodeIndex.offset()).ptr();
108 uint32_t locationBits = CallSiteIndex(BytecodeIndex(bitwise_cast<uint32_t>(instruction))).bits();
109#else
110 uint32_t locationBits = CallSiteIndex(m_bytecodeIndex).bits();
111#endif
112 store32(TrustedImm32(locationBits), tagFor(CallFrameSlot::argumentCount));
113
114 // FIXME: It's not clear that this is needed. JITOperations tend to update the top call frame on
115 // the C++ side.
116 // https://bugs.webkit.org/show_bug.cgi?id=155693
117 storePtr(callFrameRegister, &m_vm->topCallFrame);
118}
119
120ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheck(const FunctionPtr<CFunctionPtrTag> function)
121{
122 updateTopCallFrame();
123 MacroAssembler::Call call = appendCall(function);
124 exceptionCheck();
125 return call;
126}
127
128#if OS(WINDOWS) && CPU(X86_64)
129ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr<CFunctionPtrTag> function)
130{
131 updateTopCallFrame();
132 MacroAssembler::Call call = appendCallWithSlowPathReturnType(function);
133 exceptionCheck();
134 return call;
135}
136#endif
137
138ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithCallFrameRollbackOnException(const FunctionPtr<CFunctionPtrTag> function)
139{
140 updateTopCallFrame(); // The callee is responsible for setting topCallFrame to their caller
141 MacroAssembler::Call call = appendCall(function);
142 exceptionCheckWithCallFrameRollback();
143 return call;
144}
145
146ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr<CFunctionPtrTag> function, int dst)
147{
148 MacroAssembler::Call call = appendCallWithExceptionCheck(function);
149#if USE(JSVALUE64)
150 emitPutVirtualRegister(dst, returnValueGPR);
151#else
152 emitStore(dst, returnValueGPR2, returnValueGPR);
153#endif
154 return call;
155}
156
157template<typename Metadata>
158ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata& metadata, const FunctionPtr<CFunctionPtrTag> function, int dst)
159{
160 MacroAssembler::Call call = appendCallWithExceptionCheck(function);
161 emitValueProfilingSite(metadata);
162#if USE(JSVALUE64)
163 emitPutVirtualRegister(dst, returnValueGPR);
164#else
165 emitStore(dst, returnValueGPR2, returnValueGPR);
166#endif
167 return call;
168}
169
170ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
171{
172 if (!m_codeBlock->isKnownNotImmediate(vReg))
173 linkSlowCase(iter);
174}
175
176ALWAYS_INLINE void JIT::linkAllSlowCasesForBytecodeIndex(Vector<SlowCaseEntry>& slowCases, Vector<SlowCaseEntry>::iterator& iter, BytecodeIndex bytecodeIndex)
177{
178 while (iter != slowCases.end() && iter->to == bytecodeIndex)
179 linkSlowCase(iter);
180}
181
182ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
183{
184 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
185
186 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
187}
188
189ALWAYS_INLINE void JIT::addSlowCase(const JumpList& jumpList)
190{
191 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
192
193 for (const Jump& jump : jumpList.jumps())
194 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
195}
196
197ALWAYS_INLINE void JIT::addSlowCase()
198{
199 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
200
201 Jump emptyJump; // Doing it this way to make Windows happy.
202 m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeIndex));
203}
204
205ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
206{
207 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
208
209 m_jmpTable.append(JumpTable(jump, m_bytecodeIndex.offset() + relativeOffset));
210}
211
212ALWAYS_INLINE void JIT::addJump(const JumpList& jumpList, int relativeOffset)
213{
214 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
215
216 for (auto& jump : jumpList.jumps())
217 addJump(jump, relativeOffset);
218}
219
220ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
221{
222 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
223
224 jump.linkTo(m_labels[m_bytecodeIndex.offset() + relativeOffset], this);
225}
226
227#if ENABLE(SAMPLING_FLAGS)
228ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
229{
230 ASSERT(flag >= 1);
231 ASSERT(flag <= 32);
232 or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
233}
234
235ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
236{
237 ASSERT(flag >= 1);
238 ASSERT(flag <= 32);
239 and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
240}
241#endif
242
243#if ENABLE(SAMPLING_COUNTERS)
244ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count)
245{
246 add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
247}
248#endif
249
250#if ENABLE(OPCODE_SAMPLING)
251#if CPU(X86_64)
252ALWAYS_INLINE void JIT::sampleInstruction(const Instruction* instruction, bool inHostFunction)
253{
254 move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
255 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
256}
257#else
258ALWAYS_INLINE void JIT::sampleInstruction(const Instruction* instruction, bool inHostFunction)
259{
260 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
261}
262#endif
263#endif
264
265#if ENABLE(CODEBLOCK_SAMPLING)
266#if CPU(X86_64)
267ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
268{
269 move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
270 storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
271}
272#else
273ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
274{
275 storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
276}
277#endif
278#endif
279
280ALWAYS_INLINE bool JIT::isOperandConstantChar(int src)
281{
282 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
283}
284
285inline void JIT::emitValueProfilingSite(ValueProfile& valueProfile)
286{
287 ASSERT(shouldEmitProfiling());
288
289 const RegisterID value = regT0;
290#if USE(JSVALUE32_64)
291 const RegisterID valueTag = regT1;
292#endif
293
294 // We're in a simple configuration: only one bucket, so we can just do a direct
295 // store.
296#if USE(JSVALUE64)
297 store64(value, valueProfile.m_buckets);
298#else
299 EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile.m_buckets);
300 store32(value, &descriptor->asBits.payload);
301 store32(valueTag, &descriptor->asBits.tag);
302#endif
303}
304
305template<typename Op>
306inline std::enable_if_t<std::is_same<decltype(Op::Metadata::m_profile), ValueProfile>::value, void> JIT::emitValueProfilingSiteIfProfiledOpcode(Op bytecode)
307{
308 emitValueProfilingSite(bytecode.metadata(m_codeBlock));
309}
310
311inline void JIT::emitValueProfilingSiteIfProfiledOpcode(...) { }
312
313template<typename Metadata>
314inline void JIT::emitValueProfilingSite(Metadata& metadata)
315{
316 if (!shouldEmitProfiling())
317 return;
318 emitValueProfilingSite(metadata.m_profile);
319}
320
321inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile* arrayProfile)
322{
323 if (shouldEmitProfiling()) {
324 load32(MacroAssembler::Address(cell, JSCell::structureIDOffset()), indexingType);
325 store32(indexingType, arrayProfile->addressOfLastSeenStructureID());
326 }
327
328 load8(Address(cell, JSCell::indexingTypeAndMiscOffset()), indexingType);
329}
330
331inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile)
332{
333 store8(TrustedImm32(1), arrayProfile->addressOfMayStoreToHole());
334}
335
336inline void JIT::emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile* arrayProfile)
337{
338 store8(TrustedImm32(1), arrayProfile->addressOfOutOfBounds());
339}
340
341inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
342{
343 auto arrayProfileSaw = [] (ArrayModes arrayModes, IndexingType capability) {
344 return arrayModesIncludeIgnoringTypedArrays(arrayModes, capability);
345 };
346
347 ConcurrentJSLocker locker(m_codeBlock->m_lock);
348 profile->computeUpdatedPrediction(locker, m_codeBlock);
349 ArrayModes arrayModes = profile->observedArrayModes(locker);
350 if (arrayProfileSaw(arrayModes, DoubleShape))
351 return JITDouble;
352 if (arrayProfileSaw(arrayModes, Int32Shape))
353 return JITInt32;
354 if (arrayProfileSaw(arrayModes, ArrayStorageShape))
355 return JITArrayStorage;
356 return JITContiguous;
357}
358
359ALWAYS_INLINE int32_t JIT::getOperandConstantInt(int src)
360{
361 return getConstantOperand(src).asInt32();
362}
363
364ALWAYS_INLINE double JIT::getOperandConstantDouble(int src)
365{
366 return getConstantOperand(src).asDouble();
367}
368
369ALWAYS_INLINE void JIT::emitInitRegister(int dst)
370{
371 storeTrustedValue(jsUndefined(), addressFor(dst));
372}
373
374#if USE(JSVALUE32_64)
375
376inline void JIT::emitLoadTag(int index, RegisterID tag)
377{
378 if (m_codeBlock->isConstantRegisterIndex(index)) {
379 move(Imm32(getConstantOperand(index).tag()), tag);
380 return;
381 }
382
383 load32(tagFor(index), tag);
384}
385
386inline void JIT::emitLoadPayload(int index, RegisterID payload)
387{
388 if (m_codeBlock->isConstantRegisterIndex(index)) {
389 move(Imm32(getConstantOperand(index).payload()), payload);
390 return;
391 }
392
393 load32(payloadFor(index), payload);
394}
395
396inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
397{
398 move(Imm32(v.payload()), payload);
399 move(Imm32(v.tag()), tag);
400}
401
402ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
403{
404 emitLoad(src, dst.tagGPR(), dst.payloadGPR());
405}
406
407ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
408{
409 emitStore(dst, from.tagGPR(), from.payloadGPR());
410}
411
412inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
413{
414 RELEASE_ASSERT(tag != payload);
415
416 if (base == callFrameRegister) {
417 RELEASE_ASSERT(payload != base);
418 emitLoadPayload(index, payload);
419 emitLoadTag(index, tag);
420 return;
421 }
422
423 VirtualRegister target { index };
424 if (payload == base) { // avoid stomping base
425 load32(tagFor(target, base), tag);
426 load32(payloadFor(target, base), payload);
427 return;
428 }
429
430 load32(payloadFor(target, base), payload);
431 load32(tagFor(target, base), tag);
432}
433
434inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2)
435{
436 emitLoad(index2, tag2, payload2);
437 emitLoad(index1, tag1, payload1);
438}
439
440inline void JIT::emitLoadDouble(int index, FPRegisterID value)
441{
442 if (m_codeBlock->isConstantRegisterIndex(index)) {
443 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
444 loadDouble(TrustedImmPtr(&inConstantPool), value);
445 } else
446 loadDouble(addressFor(index), value);
447}
448
449inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
450{
451 if (m_codeBlock->isConstantRegisterIndex(index)) {
452 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
453 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
454 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
455 } else
456 convertInt32ToDouble(payloadFor(index), value);
457}
458
459inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base)
460{
461 VirtualRegister target { index };
462 store32(payload, payloadFor(target, base));
463 store32(tag, tagFor(target, base));
464}
465
466inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32)
467{
468 store32(payload, payloadFor(index));
469 if (!indexIsInt32)
470 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index));
471}
472
473inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32)
474{
475 store32(payload, payloadFor(index));
476 if (!indexIsInt32)
477 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index));
478}
479
480inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell)
481{
482 store32(payload, payloadFor(index));
483 if (!indexIsCell)
484 store32(TrustedImm32(JSValue::CellTag), tagFor(index));
485}
486
487inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool)
488{
489 store32(payload, payloadFor(index));
490 if (!indexIsBool)
491 store32(TrustedImm32(JSValue::BooleanTag), tagFor(index));
492}
493
494inline void JIT::emitStoreDouble(int index, FPRegisterID value)
495{
496 storeDouble(value, addressFor(index));
497}
498
499inline void JIT::emitStore(int index, const JSValue constant, RegisterID base)
500{
501 VirtualRegister target { index };
502 store32(Imm32(constant.payload()), payloadFor(target, base));
503 store32(Imm32(constant.tag()), tagFor(target, base));
504}
505
506inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex)
507{
508 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
509 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
510 addSlowCase(jump());
511 else
512 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
513 }
514}
515
516inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag)
517{
518 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
519 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
520 addSlowCase(jump());
521 else
522 addSlowCase(branchIfNotCell(tag));
523 }
524}
525
526ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
527{
528 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
529}
530
531ALWAYS_INLINE bool JIT::getOperandConstantInt(int op1, int op2, int& op, int32_t& constant)
532{
533 if (isOperandConstantInt(op1)) {
534 constant = getConstantOperand(op1).asInt32();
535 op = op2;
536 return true;
537 }
538
539 if (isOperandConstantInt(op2)) {
540 constant = getConstantOperand(op2).asInt32();
541 op = op1;
542 return true;
543 }
544
545 return false;
546}
547
548#else // USE(JSVALUE32_64)
549
550// get arg puts an arg from the SF register array into a h/w register
551ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
552{
553 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
554
555 if (m_codeBlock->isConstantRegisterIndex(src)) {
556 JSValue value = m_codeBlock->getConstant(src);
557 if (!value.isNumber())
558 move(TrustedImm64(JSValue::encode(value)), dst);
559 else
560 move(Imm64(JSValue::encode(value)), dst);
561 return;
562 }
563
564 load64(addressFor(src), dst);
565}
566
567ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
568{
569 emitGetVirtualRegister(src, dst.payloadGPR());
570}
571
572ALWAYS_INLINE void JIT::emitGetVirtualRegister(VirtualRegister src, RegisterID dst)
573{
574 emitGetVirtualRegister(src.offset(), dst);
575}
576
577ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
578{
579 emitGetVirtualRegister(src1, dst1);
580 emitGetVirtualRegister(src2, dst2);
581}
582
583ALWAYS_INLINE void JIT::emitGetVirtualRegisters(VirtualRegister src1, RegisterID dst1, VirtualRegister src2, RegisterID dst2)
584{
585 emitGetVirtualRegisters(src1.offset(), dst1, src2.offset(), dst2);
586}
587
588ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
589{
590 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
591}
592
593ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, RegisterID from)
594{
595 store64(from, addressFor(dst));
596}
597
598ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
599{
600 emitPutVirtualRegister(dst, from.payloadGPR());
601}
602
603ALWAYS_INLINE void JIT::emitPutVirtualRegister(VirtualRegister dst, RegisterID from)
604{
605 emitPutVirtualRegister(dst.offset(), from);
606}
607
608ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
609{
610 move(reg1, scratch);
611 or64(reg2, scratch);
612 return branchIfCell(scratch);
613}
614
615ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
616{
617 addSlowCase(branchIfCell(reg));
618}
619
620ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
621{
622 addSlowCase(branchIfNotCell(reg));
623}
624
625ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
626{
627 if (!m_codeBlock->isKnownNotImmediate(vReg))
628 emitJumpSlowCaseIfNotJSCell(reg);
629}
630
631inline void JIT::emitLoadDouble(int index, FPRegisterID value)
632{
633 if (m_codeBlock->isConstantRegisterIndex(index)) {
634 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
635 loadDouble(TrustedImmPtr(&inConstantPool), value);
636 } else
637 loadDouble(addressFor(index), value);
638}
639
640inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
641{
642 if (m_codeBlock->isConstantRegisterIndex(index)) {
643 ASSERT(isOperandConstantInt(index));
644 convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
645 } else
646 convertInt32ToDouble(addressFor(index), value);
647}
648
649ALWAYS_INLINE JIT::PatchableJump JIT::emitPatchableJumpIfNotInt(RegisterID reg)
650{
651 return patchableBranch64(Below, reg, numberTagRegister);
652}
653
654ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
655{
656 move(reg1, scratch);
657 and64(reg2, scratch);
658 return branchIfNotInt32(scratch);
659}
660
661ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg)
662{
663 addSlowCase(branchIfNotInt32(reg));
664}
665
666ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
667{
668 addSlowCase(emitJumpIfNotInt(reg1, reg2, scratch));
669}
670
671ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotNumber(RegisterID reg)
672{
673 addSlowCase(branchIfNotNumber(reg));
674}
675
676#endif // USE(JSVALUE32_64)
677
678ALWAYS_INLINE int JIT::jumpTarget(const Instruction* instruction, int target)
679{
680 if (target)
681 return target;
682 return m_codeBlock->outOfLineJumpOffset(instruction);
683}
684
685ALWAYS_INLINE GetPutInfo JIT::copiedGetPutInfo(OpPutToScope bytecode)
686{
687 unsigned key = bytecode.m_metadataID + 1; // HashMap doesn't like 0 as a key
688 auto iterator = m_copiedGetPutInfos.find(key);
689 if (iterator != m_copiedGetPutInfos.end())
690 return GetPutInfo(iterator->value);
691 GetPutInfo getPutInfo = bytecode.metadata(m_codeBlock).m_getPutInfo;
692 m_copiedGetPutInfos.add(key, getPutInfo.operand());
693 return getPutInfo;
694}
695
696template<typename BinaryOp>
697ALWAYS_INLINE BinaryArithProfile JIT::copiedArithProfile(BinaryOp bytecode)
698{
699 uint64_t key = (static_cast<uint64_t>(BinaryOp::opcodeID) + 1) << 32 | static_cast<uint64_t>(bytecode.m_metadataID);
700 auto iterator = m_copiedArithProfiles.find(key);
701 if (iterator != m_copiedArithProfiles.end())
702 return iterator->value;
703 BinaryArithProfile arithProfile = bytecode.metadata(m_codeBlock).m_arithProfile;
704 m_copiedArithProfiles.add(key, arithProfile);
705 return arithProfile;
706}
707
708} // namespace JSC
709
710#endif // ENABLE(JIT)
711