1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#if ENABLE(JIT)
29#include "JSCInlines.h"
30
31namespace JSC {
32
33inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(const Instruction* instruction, PatchableJump& badType)
34{
35#if USE(JSVALUE64)
36 JSValueRegs result = JSValueRegs(regT0);
37#else
38 JSValueRegs result = JSValueRegs(regT1, regT0);
39#endif
40 JumpList slowCases = emitDoubleLoad(instruction, badType);
41 boxDouble(fpRegT0, result);
42 return slowCases;
43}
44
45ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(const Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType)
46{
47 switch (arrayMode) {
48 case JITInt32:
49 return emitInt32Load(currentInstruction, badType);
50 case JITDouble:
51 return emitDoubleLoad(currentInstruction, badType);
52 case JITContiguous:
53 return emitContiguousLoad(currentInstruction, badType);
54 case JITArrayStorage:
55 return emitArrayStorageLoad(currentInstruction, badType);
56 default:
57 break;
58 }
59 RELEASE_ASSERT_NOT_REACHED();
60 return MacroAssembler::JumpList();
61}
62
63inline MacroAssembler::JumpList JIT::emitContiguousGetByVal(const Instruction* instruction, PatchableJump& badType, IndexingType expectedShape)
64{
65 return emitContiguousLoad(instruction, badType, expectedShape);
66}
67
68inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal(const Instruction* instruction, PatchableJump& badType)
69{
70 return emitArrayStorageLoad(instruction, badType);
71}
72
73ALWAYS_INLINE bool JIT::isOperandConstantDouble(int src)
74{
75 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
76}
77
78ALWAYS_INLINE JSValue JIT::getConstantOperand(int src)
79{
80 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
81 return m_codeBlock->getConstant(src);
82}
83
84ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, int entry)
85{
86#if USE(JSVALUE32_64)
87 store32(TrustedImm32(JSValue::Int32Tag), tagFor(entry));
88 store32(from, payloadFor(entry));
89#else
90 store64(from, addressFor(entry));
91#endif
92}
93
94ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
95{
96 failures.append(branchIfNotString(src));
97 loadPtr(MacroAssembler::Address(src, JSString::offsetOfValue()), dst);
98 failures.append(branchIfRopeStringImpl(dst));
99 failures.append(branch32(NotEqual, MacroAssembler::Address(dst, StringImpl::lengthMemoryOffset()), TrustedImm32(1)));
100 loadPtr(MacroAssembler::Address(dst, StringImpl::flagsOffset()), regT1);
101 loadPtr(MacroAssembler::Address(dst, StringImpl::dataOffset()), dst);
102
103 JumpList is16Bit;
104 JumpList cont8Bit;
105 is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(StringImpl::flagIs8Bit())));
106 load8(MacroAssembler::Address(dst, 0), dst);
107 cont8Bit.append(jump());
108 is16Bit.link(this);
109 load16(MacroAssembler::Address(dst, 0), dst);
110 cont8Bit.link(this);
111}
112
113ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr<NoPtrTag> target)
114{
115 ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
116 Call nakedCall = nearCall();
117 m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, FunctionPtr<OperationPtrTag>(target.retagged<OperationPtrTag>())));
118 return nakedCall;
119}
120
121ALWAYS_INLINE JIT::Call JIT::emitNakedTailCall(CodePtr<NoPtrTag> target)
122{
123 ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
124 Call nakedCall = nearTailCall();
125 m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, FunctionPtr<OperationPtrTag>(target.retagged<OperationPtrTag>())));
126 return nakedCall;
127}
128
129ALWAYS_INLINE void JIT::updateTopCallFrame()
130{
131 ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
132#if USE(JSVALUE32_64)
133 const Instruction* instruction = m_codeBlock->instructions().at(m_bytecodeOffset).ptr();
134 uint32_t locationBits = CallSiteIndex(instruction).bits();
135#else
136 uint32_t locationBits = CallSiteIndex(m_bytecodeOffset).bits();
137#endif
138 store32(TrustedImm32(locationBits), tagFor(CallFrameSlot::argumentCount));
139
140 // FIXME: It's not clear that this is needed. JITOperations tend to update the top call frame on
141 // the C++ side.
142 // https://bugs.webkit.org/show_bug.cgi?id=155693
143 storePtr(callFrameRegister, &m_vm->topCallFrame);
144}
145
146ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheck(const FunctionPtr<CFunctionPtrTag> function)
147{
148 updateTopCallFrame();
149 MacroAssembler::Call call = appendCall(function);
150 exceptionCheck();
151 return call;
152}
153
154#if OS(WINDOWS) && CPU(X86_64)
155ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr<CFunctionPtrTag> function)
156{
157 updateTopCallFrame();
158 MacroAssembler::Call call = appendCallWithSlowPathReturnType(function);
159 exceptionCheck();
160 return call;
161}
162#endif
163
164ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithCallFrameRollbackOnException(const FunctionPtr<CFunctionPtrTag> function)
165{
166 updateTopCallFrame(); // The callee is responsible for setting topCallFrame to their caller
167 MacroAssembler::Call call = appendCall(function);
168 exceptionCheckWithCallFrameRollback();
169 return call;
170}
171
172ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr<CFunctionPtrTag> function, int dst)
173{
174 MacroAssembler::Call call = appendCallWithExceptionCheck(function);
175#if USE(JSVALUE64)
176 emitPutVirtualRegister(dst, returnValueGPR);
177#else
178 emitStore(dst, returnValueGPR2, returnValueGPR);
179#endif
180 return call;
181}
182
183template<typename Metadata>
184ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata& metadata, const FunctionPtr<CFunctionPtrTag> function, int dst)
185{
186 MacroAssembler::Call call = appendCallWithExceptionCheck(function);
187 emitValueProfilingSite(metadata);
188#if USE(JSVALUE64)
189 emitPutVirtualRegister(dst, returnValueGPR);
190#else
191 emitStore(dst, returnValueGPR2, returnValueGPR);
192#endif
193 return call;
194}
195
196ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
197{
198 if (!m_codeBlock->isKnownNotImmediate(vReg))
199 linkSlowCase(iter);
200}
201
202ALWAYS_INLINE void JIT::linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases, Vector<SlowCaseEntry>::iterator& iter, unsigned bytecodeOffset)
203{
204 while (iter != slowCases.end() && iter->to == bytecodeOffset)
205 linkSlowCase(iter);
206}
207
208ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
209{
210 ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
211
212 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
213}
214
215ALWAYS_INLINE void JIT::addSlowCase(const JumpList& jumpList)
216{
217 ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
218
219 for (const Jump& jump : jumpList.jumps())
220 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
221}
222
223ALWAYS_INLINE void JIT::addSlowCase()
224{
225 ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
226
227 Jump emptyJump; // Doing it this way to make Windows happy.
228 m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
229}
230
231ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
232{
233 ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
234
235 m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
236}
237
238ALWAYS_INLINE void JIT::addJump(const JumpList& jumpList, int relativeOffset)
239{
240 ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
241
242 for (auto& jump : jumpList.jumps())
243 addJump(jump, relativeOffset);
244}
245
246ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
247{
248 ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
249
250 jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
251}
252
253#if ENABLE(SAMPLING_FLAGS)
254ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
255{
256 ASSERT(flag >= 1);
257 ASSERT(flag <= 32);
258 or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
259}
260
261ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
262{
263 ASSERT(flag >= 1);
264 ASSERT(flag <= 32);
265 and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
266}
267#endif
268
269#if ENABLE(SAMPLING_COUNTERS)
270ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count)
271{
272 add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
273}
274#endif
275
276#if ENABLE(OPCODE_SAMPLING)
277#if CPU(X86_64)
278ALWAYS_INLINE void JIT::sampleInstruction(const Instruction* instruction, bool inHostFunction)
279{
280 move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
281 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
282}
283#else
284ALWAYS_INLINE void JIT::sampleInstruction(const Instruction* instruction, bool inHostFunction)
285{
286 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
287}
288#endif
289#endif
290
291#if ENABLE(CODEBLOCK_SAMPLING)
292#if CPU(X86_64)
293ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
294{
295 move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
296 storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
297}
298#else
299ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
300{
301 storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
302}
303#endif
304#endif
305
306ALWAYS_INLINE bool JIT::isOperandConstantChar(int src)
307{
308 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
309}
310
311inline void JIT::emitValueProfilingSite(ValueProfile& valueProfile)
312{
313 ASSERT(shouldEmitProfiling());
314
315 const RegisterID value = regT0;
316#if USE(JSVALUE32_64)
317 const RegisterID valueTag = regT1;
318#endif
319
320 // We're in a simple configuration: only one bucket, so we can just do a direct
321 // store.
322#if USE(JSVALUE64)
323 store64(value, valueProfile.m_buckets);
324#else
325 EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile.m_buckets);
326 store32(value, &descriptor->asBits.payload);
327 store32(valueTag, &descriptor->asBits.tag);
328#endif
329}
330
331template<typename Op>
332inline std::enable_if_t<std::is_same<decltype(Op::Metadata::m_profile), ValueProfile>::value, void> JIT::emitValueProfilingSiteIfProfiledOpcode(Op bytecode)
333{
334 emitValueProfilingSite(bytecode.metadata(m_codeBlock));
335}
336
337inline void JIT::emitValueProfilingSiteIfProfiledOpcode(...) { }
338
339template<typename Metadata>
340inline void JIT::emitValueProfilingSite(Metadata& metadata)
341{
342 if (!shouldEmitProfiling())
343 return;
344 emitValueProfilingSite(metadata.m_profile);
345}
346
347inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile* arrayProfile)
348{
349 if (shouldEmitProfiling()) {
350 load32(MacroAssembler::Address(cell, JSCell::structureIDOffset()), indexingType);
351 store32(indexingType, arrayProfile->addressOfLastSeenStructureID());
352 }
353
354 load8(Address(cell, JSCell::indexingTypeAndMiscOffset()), indexingType);
355}
356
357inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile)
358{
359 store8(TrustedImm32(1), arrayProfile->addressOfMayStoreToHole());
360}
361
362inline void JIT::emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile* arrayProfile)
363{
364 store8(TrustedImm32(1), arrayProfile->addressOfOutOfBounds());
365}
366
367inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
368{
369 auto arrayProfileSaw = [] (ArrayModes arrayModes, IndexingType capability) {
370 return arrayModesIncludeIgnoringTypedArrays(arrayModes, capability);
371 };
372
373 ConcurrentJSLocker locker(m_codeBlock->m_lock);
374 profile->computeUpdatedPrediction(locker, m_codeBlock);
375 ArrayModes arrayModes = profile->observedArrayModes(locker);
376 if (arrayProfileSaw(arrayModes, DoubleShape))
377 return JITDouble;
378 if (arrayProfileSaw(arrayModes, Int32Shape))
379 return JITInt32;
380 if (arrayProfileSaw(arrayModes, ArrayStorageShape))
381 return JITArrayStorage;
382 return JITContiguous;
383}
384
385ALWAYS_INLINE int32_t JIT::getOperandConstantInt(int src)
386{
387 return getConstantOperand(src).asInt32();
388}
389
390ALWAYS_INLINE double JIT::getOperandConstantDouble(int src)
391{
392 return getConstantOperand(src).asDouble();
393}
394
395ALWAYS_INLINE void JIT::emitInitRegister(int dst)
396{
397 storeTrustedValue(jsUndefined(), addressFor(dst));
398}
399
400#if USE(JSVALUE32_64)
401
402inline void JIT::emitLoadTag(int index, RegisterID tag)
403{
404 if (m_codeBlock->isConstantRegisterIndex(index)) {
405 move(Imm32(getConstantOperand(index).tag()), tag);
406 return;
407 }
408
409 load32(tagFor(index), tag);
410}
411
412inline void JIT::emitLoadPayload(int index, RegisterID payload)
413{
414 if (m_codeBlock->isConstantRegisterIndex(index)) {
415 move(Imm32(getConstantOperand(index).payload()), payload);
416 return;
417 }
418
419 load32(payloadFor(index), payload);
420}
421
422inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
423{
424 move(Imm32(v.payload()), payload);
425 move(Imm32(v.tag()), tag);
426}
427
428ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
429{
430 emitLoad(src, dst.tagGPR(), dst.payloadGPR());
431}
432
433ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
434{
435 emitStore(dst, from.tagGPR(), from.payloadGPR());
436}
437
438inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
439{
440 RELEASE_ASSERT(tag != payload);
441
442 if (base == callFrameRegister) {
443 RELEASE_ASSERT(payload != base);
444 emitLoadPayload(index, payload);
445 emitLoadTag(index, tag);
446 return;
447 }
448
449 VirtualRegister target { index };
450 if (payload == base) { // avoid stomping base
451 load32(tagFor(target, base), tag);
452 load32(payloadFor(target, base), payload);
453 return;
454 }
455
456 load32(payloadFor(target, base), payload);
457 load32(tagFor(target, base), tag);
458}
459
460inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2)
461{
462 emitLoad(index2, tag2, payload2);
463 emitLoad(index1, tag1, payload1);
464}
465
466inline void JIT::emitLoadDouble(int index, FPRegisterID value)
467{
468 if (m_codeBlock->isConstantRegisterIndex(index)) {
469 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
470 loadDouble(TrustedImmPtr(&inConstantPool), value);
471 } else
472 loadDouble(addressFor(index), value);
473}
474
475inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
476{
477 if (m_codeBlock->isConstantRegisterIndex(index)) {
478 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
479 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
480 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
481 } else
482 convertInt32ToDouble(payloadFor(index), value);
483}
484
485inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base)
486{
487 VirtualRegister target { index };
488 store32(payload, payloadFor(target, base));
489 store32(tag, tagFor(target, base));
490}
491
492inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32)
493{
494 store32(payload, payloadFor(index));
495 if (!indexIsInt32)
496 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index));
497}
498
499inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32)
500{
501 store32(payload, payloadFor(index));
502 if (!indexIsInt32)
503 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index));
504}
505
506inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell)
507{
508 store32(payload, payloadFor(index));
509 if (!indexIsCell)
510 store32(TrustedImm32(JSValue::CellTag), tagFor(index));
511}
512
513inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool)
514{
515 store32(payload, payloadFor(index));
516 if (!indexIsBool)
517 store32(TrustedImm32(JSValue::BooleanTag), tagFor(index));
518}
519
520inline void JIT::emitStoreDouble(int index, FPRegisterID value)
521{
522 storeDouble(value, addressFor(index));
523}
524
525inline void JIT::emitStore(int index, const JSValue constant, RegisterID base)
526{
527 VirtualRegister target { index };
528 store32(Imm32(constant.payload()), payloadFor(target, base));
529 store32(Imm32(constant.tag()), tagFor(target, base));
530}
531
532inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex)
533{
534 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
535 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
536 addSlowCase(jump());
537 else
538 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
539 }
540}
541
542inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag)
543{
544 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
545 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
546 addSlowCase(jump());
547 else
548 addSlowCase(branchIfNotCell(tag));
549 }
550}
551
552ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
553{
554 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
555}
556
557ALWAYS_INLINE bool JIT::getOperandConstantInt(int op1, int op2, int& op, int32_t& constant)
558{
559 if (isOperandConstantInt(op1)) {
560 constant = getConstantOperand(op1).asInt32();
561 op = op2;
562 return true;
563 }
564
565 if (isOperandConstantInt(op2)) {
566 constant = getConstantOperand(op2).asInt32();
567 op = op1;
568 return true;
569 }
570
571 return false;
572}
573
574#else // USE(JSVALUE32_64)
575
576// get arg puts an arg from the SF register array into a h/w register
577ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
578{
579 ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
580
581 if (m_codeBlock->isConstantRegisterIndex(src)) {
582 JSValue value = m_codeBlock->getConstant(src);
583 if (!value.isNumber())
584 move(TrustedImm64(JSValue::encode(value)), dst);
585 else
586 move(Imm64(JSValue::encode(value)), dst);
587 return;
588 }
589
590 load64(addressFor(src), dst);
591}
592
593ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
594{
595 emitGetVirtualRegister(src, dst.payloadGPR());
596}
597
598ALWAYS_INLINE void JIT::emitGetVirtualRegister(VirtualRegister src, RegisterID dst)
599{
600 emitGetVirtualRegister(src.offset(), dst);
601}
602
603ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
604{
605 emitGetVirtualRegister(src1, dst1);
606 emitGetVirtualRegister(src2, dst2);
607}
608
609ALWAYS_INLINE void JIT::emitGetVirtualRegisters(VirtualRegister src1, RegisterID dst1, VirtualRegister src2, RegisterID dst2)
610{
611 emitGetVirtualRegisters(src1.offset(), dst1, src2.offset(), dst2);
612}
613
614ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
615{
616 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
617}
618
619ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, RegisterID from)
620{
621 store64(from, addressFor(dst));
622}
623
624ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
625{
626 emitPutVirtualRegister(dst, from.payloadGPR());
627}
628
629ALWAYS_INLINE void JIT::emitPutVirtualRegister(VirtualRegister dst, RegisterID from)
630{
631 emitPutVirtualRegister(dst.offset(), from);
632}
633
634ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
635{
636 move(reg1, scratch);
637 or64(reg2, scratch);
638 return branchIfCell(scratch);
639}
640
641ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
642{
643 addSlowCase(branchIfCell(reg));
644}
645
646ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
647{
648 addSlowCase(branchIfNotCell(reg));
649}
650
651ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
652{
653 if (!m_codeBlock->isKnownNotImmediate(vReg))
654 emitJumpSlowCaseIfNotJSCell(reg);
655}
656
657inline void JIT::emitLoadDouble(int index, FPRegisterID value)
658{
659 if (m_codeBlock->isConstantRegisterIndex(index)) {
660 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
661 loadDouble(TrustedImmPtr(&inConstantPool), value);
662 } else
663 loadDouble(addressFor(index), value);
664}
665
666inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
667{
668 if (m_codeBlock->isConstantRegisterIndex(index)) {
669 ASSERT(isOperandConstantInt(index));
670 convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
671 } else
672 convertInt32ToDouble(addressFor(index), value);
673}
674
675ALWAYS_INLINE JIT::PatchableJump JIT::emitPatchableJumpIfNotInt(RegisterID reg)
676{
677 return patchableBranch64(Below, reg, tagTypeNumberRegister);
678}
679
680ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
681{
682 move(reg1, scratch);
683 and64(reg2, scratch);
684 return branchIfNotInt32(scratch);
685}
686
687ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg)
688{
689 addSlowCase(branchIfNotInt32(reg));
690}
691
692ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
693{
694 addSlowCase(emitJumpIfNotInt(reg1, reg2, scratch));
695}
696
697ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotNumber(RegisterID reg)
698{
699 addSlowCase(branchIfNotNumber(reg));
700}
701
702#endif // USE(JSVALUE32_64)
703
704ALWAYS_INLINE int JIT::jumpTarget(const Instruction* instruction, int target)
705{
706 if (target)
707 return target;
708 return m_codeBlock->outOfLineJumpOffset(instruction);
709}
710
711ALWAYS_INLINE GetPutInfo JIT::copiedGetPutInfo(OpPutToScope bytecode)
712{
713 unsigned key = bytecode.m_metadataID + 1; // HashMap doesn't like 0 as a key
714 auto iterator = m_copiedGetPutInfos.find(key);
715 if (iterator != m_copiedGetPutInfos.end())
716 return GetPutInfo(iterator->value);
717 GetPutInfo getPutInfo = bytecode.metadata(m_codeBlock).m_getPutInfo;
718 m_copiedGetPutInfos.add(key, getPutInfo.operand());
719 return getPutInfo;
720}
721
722template<typename BinaryOp>
723ALWAYS_INLINE ArithProfile JIT::copiedArithProfile(BinaryOp bytecode)
724{
725 uint64_t key = static_cast<uint64_t>(BinaryOp::opcodeID) << 32 | static_cast<uint64_t>(bytecode.m_metadataID);
726 auto iterator = m_copiedArithProfiles.find(key);
727 if (iterator != m_copiedArithProfiles.end())
728 return iterator->value;
729 ArithProfile arithProfile = bytecode.metadata(m_codeBlock).m_arithProfile;
730 m_copiedArithProfiles.add(key, arithProfile);
731 return arithProfile;
732}
733
734} // namespace JSC
735
736#endif // ENABLE(JIT)
737