1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #if ENABLE(JIT) |
29 | |
30 | // We've run into some problems where changing the size of the class JIT leads to |
31 | // performance fluctuations. Try forcing alignment in an attempt to stabilize this. |
32 | #if COMPILER(GCC_COMPATIBLE) |
33 | #define JIT_CLASS_ALIGNMENT alignas(32) |
34 | #else |
35 | #define JIT_CLASS_ALIGNMENT |
36 | #endif |
37 | |
38 | #define ASSERT_JIT_OFFSET(actual, expected) ASSERT_WITH_MESSAGE(actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(expected), static_cast<int>(actual)); |
39 | |
40 | #include "CodeBlock.h" |
41 | #include "CommonSlowPaths.h" |
42 | #include "JITDisassembler.h" |
43 | #include "JITInlineCacheGenerator.h" |
44 | #include "JITMathIC.h" |
45 | #include "JITRightShiftGenerator.h" |
46 | #include "JSInterfaceJIT.h" |
47 | #include "PCToCodeOriginMap.h" |
48 | #include "UnusedPointer.h" |
49 | |
50 | namespace JSC { |
51 | |
52 | enum OpcodeID : unsigned; |
53 | |
54 | class ArrayAllocationProfile; |
55 | class CallLinkInfo; |
56 | class CodeBlock; |
57 | class FunctionExecutable; |
58 | class JIT; |
59 | class Identifier; |
60 | class Interpreter; |
61 | class BlockDirectory; |
62 | class Register; |
63 | class StructureChain; |
64 | class StructureStubInfo; |
65 | |
66 | struct Instruction; |
67 | struct OperandTypes; |
68 | struct SimpleJumpTable; |
69 | struct StringJumpTable; |
70 | |
71 | struct CallRecord { |
72 | MacroAssembler::Call from; |
73 | BytecodeIndex bytecodeIndex; |
74 | FunctionPtr<OperationPtrTag> callee; |
75 | |
76 | CallRecord() |
77 | { |
78 | } |
79 | |
80 | CallRecord(MacroAssembler::Call from, BytecodeIndex bytecodeIndex, FunctionPtr<OperationPtrTag> callee) |
81 | : from(from) |
82 | , bytecodeIndex(bytecodeIndex) |
83 | , callee(callee) |
84 | { |
85 | } |
86 | }; |
87 | |
88 | struct JumpTable { |
89 | MacroAssembler::Jump from; |
90 | unsigned toBytecodeOffset; |
91 | |
92 | JumpTable(MacroAssembler::Jump f, unsigned t) |
93 | : from(f) |
94 | , toBytecodeOffset(t) |
95 | { |
96 | } |
97 | }; |
98 | |
99 | struct SlowCaseEntry { |
100 | MacroAssembler::Jump from; |
101 | BytecodeIndex to; |
102 | |
103 | SlowCaseEntry(MacroAssembler::Jump f, BytecodeIndex t) |
104 | : from(f) |
105 | , to(t) |
106 | { |
107 | } |
108 | }; |
109 | |
110 | struct SwitchRecord { |
111 | enum Type { |
112 | Immediate, |
113 | Character, |
114 | String |
115 | }; |
116 | |
117 | Type type; |
118 | |
119 | union { |
120 | SimpleJumpTable* simpleJumpTable; |
121 | StringJumpTable* stringJumpTable; |
122 | } jumpTable; |
123 | |
124 | BytecodeIndex bytecodeIndex; |
125 | unsigned defaultOffset; |
126 | |
127 | SwitchRecord(SimpleJumpTable* jumpTable, BytecodeIndex bytecodeIndex, unsigned defaultOffset, Type type) |
128 | : type(type) |
129 | , bytecodeIndex(bytecodeIndex) |
130 | , defaultOffset(defaultOffset) |
131 | { |
132 | this->jumpTable.simpleJumpTable = jumpTable; |
133 | } |
134 | |
135 | SwitchRecord(StringJumpTable* jumpTable, BytecodeIndex bytecodeIndex, unsigned defaultOffset) |
136 | : type(String) |
137 | , bytecodeIndex(bytecodeIndex) |
138 | , defaultOffset(defaultOffset) |
139 | { |
140 | this->jumpTable.stringJumpTable = jumpTable; |
141 | } |
142 | }; |
143 | |
144 | struct ByValCompilationInfo { |
145 | ByValCompilationInfo() { } |
146 | |
147 | ByValCompilationInfo(ByValInfo* byValInfo, BytecodeIndex bytecodeIndex, MacroAssembler::PatchableJump notIndexJump, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, MacroAssembler::Label doneTarget, MacroAssembler::Label nextHotPathTarget) |
148 | : byValInfo(byValInfo) |
149 | , bytecodeIndex(bytecodeIndex) |
150 | , notIndexJump(notIndexJump) |
151 | , badTypeJump(badTypeJump) |
152 | , arrayMode(arrayMode) |
153 | , arrayProfile(arrayProfile) |
154 | , doneTarget(doneTarget) |
155 | , nextHotPathTarget(nextHotPathTarget) |
156 | { |
157 | } |
158 | |
159 | ByValInfo* byValInfo; |
160 | BytecodeIndex bytecodeIndex; |
161 | MacroAssembler::PatchableJump notIndexJump; |
162 | MacroAssembler::PatchableJump badTypeJump; |
163 | JITArrayMode arrayMode; |
164 | ArrayProfile* arrayProfile; |
165 | MacroAssembler::Label doneTarget; |
166 | MacroAssembler::Label nextHotPathTarget; |
167 | MacroAssembler::Label slowPathTarget; |
168 | MacroAssembler::Call returnAddress; |
169 | }; |
170 | |
171 | struct CallCompilationInfo { |
172 | MacroAssembler::DataLabelPtr hotPathBegin; |
173 | MacroAssembler::Call hotPathOther; |
174 | MacroAssembler::Call callReturnLocation; |
175 | CallLinkInfo* callLinkInfo; |
176 | }; |
177 | |
178 | void ctiPatchCallByReturnAddress(ReturnAddressPtr, FunctionPtr<CFunctionPtrTag> newCalleeFunction); |
179 | |
180 | class JIT_CLASS_ALIGNMENT JIT : private JSInterfaceJIT { |
181 | friend class JITSlowPathCall; |
182 | friend class JITStubCall; |
183 | |
184 | using MacroAssembler::Jump; |
185 | using MacroAssembler::JumpList; |
186 | using MacroAssembler::Label; |
187 | |
188 | static constexpr uintptr_t patchGetByIdDefaultStructure = unusedPointer; |
189 | static constexpr int patchGetByIdDefaultOffset = 0; |
190 | // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler |
191 | // will compress the displacement, and we may not be able to fit a patched offset. |
192 | static constexpr int patchPutByIdDefaultOffset = 256; |
193 | |
194 | public: |
195 | JIT(VM&, CodeBlock* = nullptr, BytecodeIndex loopOSREntryBytecodeOffset = BytecodeIndex(0)); |
196 | ~JIT(); |
197 | |
198 | VM& vm() { return *JSInterfaceJIT::vm(); } |
199 | |
200 | void compileWithoutLinking(JITCompilationEffort); |
201 | CompilationResult link(); |
202 | |
203 | void doMainThreadPreparationBeforeCompile(); |
204 | |
205 | static CompilationResult compile(VM& vm, CodeBlock* codeBlock, JITCompilationEffort effort, BytecodeIndex bytecodeOffset = BytecodeIndex(0)) |
206 | { |
207 | return JIT(vm, codeBlock, bytecodeOffset).privateCompile(effort); |
208 | } |
209 | |
210 | static void compilePutByVal(const ConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) |
211 | { |
212 | JIT jit(vm, codeBlock); |
213 | jit.m_bytecodeIndex = byValInfo->bytecodeIndex; |
214 | jit.privateCompilePutByVal<OpPutByVal>(locker, byValInfo, returnAddress, arrayMode); |
215 | } |
216 | |
217 | static void compileDirectPutByVal(const ConcurrentJSLocker& locker, VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) |
218 | { |
219 | JIT jit(vm, codeBlock); |
220 | jit.m_bytecodeIndex = byValInfo->bytecodeIndex; |
221 | jit.privateCompilePutByVal<OpPutByValDirect>(locker, byValInfo, returnAddress, arrayMode); |
222 | } |
223 | |
224 | template<typename Op> |
225 | static void compilePutByValWithCachedId(VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName) |
226 | { |
227 | JIT jit(vm, codeBlock); |
228 | jit.m_bytecodeIndex = byValInfo->bytecodeIndex; |
229 | jit.privateCompilePutByValWithCachedId<Op>(byValInfo, returnAddress, putKind, propertyName); |
230 | } |
231 | |
232 | static void compileHasIndexedProperty(VM& vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) |
233 | { |
234 | JIT jit(vm, codeBlock); |
235 | jit.m_bytecodeIndex = byValInfo->bytecodeIndex; |
236 | jit.privateCompileHasIndexedProperty(byValInfo, returnAddress, arrayMode); |
237 | } |
238 | |
239 | static unsigned frameRegisterCountFor(CodeBlock*); |
240 | static int stackPointerOffsetFor(CodeBlock*); |
241 | |
242 | JS_EXPORT_PRIVATE static HashMap<CString, Seconds> compileTimeStats(); |
243 | JS_EXPORT_PRIVATE static Seconds totalCompileTime(); |
244 | |
245 | private: |
246 | void privateCompileMainPass(); |
247 | void privateCompileLinkPass(); |
248 | void privateCompileSlowCases(); |
249 | CompilationResult privateCompile(JITCompilationEffort); |
250 | |
251 | void privateCompileGetByVal(const ConcurrentJSLocker&, ByValInfo*, ReturnAddressPtr, JITArrayMode); |
252 | void privateCompileGetByValWithCachedId(ByValInfo*, ReturnAddressPtr, const Identifier&); |
253 | template<typename Op> |
254 | void privateCompilePutByVal(const ConcurrentJSLocker&, ByValInfo*, ReturnAddressPtr, JITArrayMode); |
255 | template<typename Op> |
256 | void privateCompilePutByValWithCachedId(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&); |
257 | |
258 | void privateCompileHasIndexedProperty(ByValInfo*, ReturnAddressPtr, JITArrayMode); |
259 | |
260 | void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress); |
261 | |
262 | // Add a call out from JIT code, without an exception check. |
263 | Call appendCall(const FunctionPtr<CFunctionPtrTag> function) |
264 | { |
265 | Call functionCall = call(OperationPtrTag); |
266 | m_calls.append(CallRecord(functionCall, m_bytecodeIndex, function.retagged<OperationPtrTag>())); |
267 | return functionCall; |
268 | } |
269 | |
270 | #if OS(WINDOWS) && CPU(X86_64) |
271 | Call appendCallWithSlowPathReturnType(const FunctionPtr<CFunctionPtrTag> function) |
272 | { |
273 | Call functionCall = callWithSlowPathReturnType(OperationPtrTag); |
274 | m_calls.append(CallRecord(functionCall, m_bytecodeIndex, function.retagged<OperationPtrTag>())); |
275 | return functionCall; |
276 | } |
277 | #endif |
278 | |
279 | void exceptionCheck(Jump jumpToHandler) |
280 | { |
281 | m_exceptionChecks.append(jumpToHandler); |
282 | } |
283 | |
284 | void exceptionCheck() |
285 | { |
286 | m_exceptionChecks.append(emitExceptionCheck(vm())); |
287 | } |
288 | |
289 | void exceptionCheckWithCallFrameRollback() |
290 | { |
291 | m_exceptionChecksWithCallFrameRollback.append(emitExceptionCheck(vm())); |
292 | } |
293 | |
294 | void privateCompileExceptionHandlers(); |
295 | |
296 | void addSlowCase(Jump); |
297 | void addSlowCase(const JumpList&); |
298 | void addSlowCase(); |
299 | void addJump(Jump, int); |
300 | void addJump(const JumpList&, int); |
301 | void emitJumpSlowToHot(Jump, int); |
302 | |
303 | template<typename Op> |
304 | void compileOpCall(const Instruction*, unsigned callLinkInfoIndex); |
305 | template<typename Op> |
306 | void compileOpCallSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex); |
307 | template<typename Op> |
308 | std::enable_if_t< |
309 | Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs |
310 | && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments |
311 | , void> compileSetupFrame(const Op&, CallLinkInfo*); |
312 | |
313 | template<typename Op> |
314 | std::enable_if_t< |
315 | Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs |
316 | || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments |
317 | , void> compileSetupFrame(const Op&, CallLinkInfo*); |
318 | |
319 | template<typename Op> |
320 | bool compileTailCall(const Op&, CallLinkInfo*, unsigned callLinkInfoIndex); |
321 | template<typename Op> |
322 | bool compileCallEval(const Op&); |
323 | void compileCallEvalSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
324 | template<typename Op> |
325 | void emitPutCallResult(const Op&); |
326 | |
327 | enum class CompileOpStrictEqType { StrictEq, NStrictEq }; |
328 | template<typename Op> |
329 | void compileOpStrictEq(const Instruction*, CompileOpStrictEqType); |
330 | template<typename Op> |
331 | void compileOpStrictEqJump(const Instruction*, CompileOpStrictEqType); |
332 | enum class CompileOpEqType { Eq, NEq }; |
333 | void compileOpEqJumpSlow(Vector<SlowCaseEntry>::iterator&, CompileOpEqType, int jumpTarget); |
334 | bool isOperandConstantDouble(int src); |
335 | |
336 | void emitLoadDouble(int index, FPRegisterID value); |
337 | void emitLoadInt32ToDouble(int index, FPRegisterID value); |
338 | |
339 | enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterBase, ShouldFilterValue, ShouldFilterBaseAndValue }; |
340 | // value register in write barrier is used before any scratch registers |
341 | // so may safely be the same as either of the scratch registers. |
342 | void emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode); |
343 | void emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode); |
344 | void emitWriteBarrier(JSCell* owner); |
345 | |
346 | // This assumes that the value to profile is in regT0 and that regT3 is available for |
347 | // scratch. |
348 | void emitValueProfilingSite(ValueProfile&); |
349 | template<typename Metadata> void emitValueProfilingSite(Metadata&); |
350 | void emitValueProfilingSiteIfProfiledOpcode(...); |
351 | template<typename Op> |
352 | std::enable_if_t<std::is_same<decltype(Op::Metadata::m_profile), ValueProfile>::value, void> |
353 | emitValueProfilingSiteIfProfiledOpcode(Op bytecode); |
354 | |
355 | void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*); |
356 | void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*); |
357 | void emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile*); |
358 | |
359 | JITArrayMode chooseArrayMode(ArrayProfile*); |
360 | |
361 | // Property is in regT1, base is in regT0. regT2 contains indexing type. |
362 | // Property is int-checked and zero extended. Base is cell checked. |
363 | // Structure is already profiled. Returns the slow cases. Fall-through |
364 | // case contains result in regT0, and it is not yet profiled. |
365 | JumpList emitInt32Load(const Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); } |
366 | JumpList emitDoubleLoad(const Instruction*, PatchableJump& badType); |
367 | JumpList emitContiguousLoad(const Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); |
368 | JumpList emitArrayStorageLoad(const Instruction*, PatchableJump& badType); |
369 | JumpList emitLoadForArrayMode(const Instruction*, JITArrayMode, PatchableJump& badType); |
370 | |
371 | // Property is in regT1, base is in regT0. regT2 contains indecing type. |
372 | // The value to store is not yet loaded. Property is int-checked and |
373 | // zero-extended. Base is cell checked. Structure is already profiled. |
374 | // returns the slow cases. |
375 | template<typename Op> |
376 | JumpList emitInt32PutByVal(Op bytecode, PatchableJump& badType) |
377 | { |
378 | return emitGenericContiguousPutByVal(bytecode, badType, Int32Shape); |
379 | } |
380 | template<typename Op> |
381 | JumpList emitDoublePutByVal(Op bytecode, PatchableJump& badType) |
382 | { |
383 | return emitGenericContiguousPutByVal(bytecode, badType, DoubleShape); |
384 | } |
385 | template<typename Op> |
386 | JumpList emitContiguousPutByVal(Op bytecode, PatchableJump& badType) |
387 | { |
388 | return emitGenericContiguousPutByVal(bytecode, badType); |
389 | } |
390 | template<typename Op> |
391 | JumpList emitGenericContiguousPutByVal(Op, PatchableJump& badType, IndexingType indexingShape = ContiguousShape); |
392 | template<typename Op> |
393 | JumpList emitArrayStoragePutByVal(Op, PatchableJump& badType); |
394 | template<typename Op> |
395 | JumpList emitIntTypedArrayPutByVal(Op, PatchableJump& badType, TypedArrayType); |
396 | template<typename Op> |
397 | JumpList emitFloatTypedArrayPutByVal(Op, PatchableJump& badType, TypedArrayType); |
398 | |
399 | // Identifier check helper for GetByVal and PutByVal. |
400 | void emitByValIdentifierCheck(ByValInfo*, RegisterID cell, RegisterID scratch, const Identifier&, JumpList& slowCases); |
401 | |
402 | template<typename Op> |
403 | JITPutByIdGenerator emitPutByValWithCachedId(ByValInfo*, Op, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases); |
404 | |
405 | enum FinalObjectMode { MayBeFinal, KnownNotFinal }; |
406 | |
407 | void emitGetVirtualRegister(int src, JSValueRegs dst); |
408 | void emitPutVirtualRegister(int dst, JSValueRegs src); |
409 | |
410 | int32_t getOperandConstantInt(int src); |
411 | double getOperandConstantDouble(int src); |
412 | |
413 | #if USE(JSVALUE32_64) |
414 | bool getOperandConstantInt(int op1, int op2, int& op, int32_t& constant); |
415 | |
416 | void emitLoadTag(int index, RegisterID tag); |
417 | void emitLoadPayload(int index, RegisterID payload); |
418 | |
419 | void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload); |
420 | void emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister); |
421 | void emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2); |
422 | |
423 | void emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister); |
424 | void emitStore(int index, const JSValue constant, RegisterID base = callFrameRegister); |
425 | void emitStoreInt32(int index, RegisterID payload, bool indexIsInt32 = false); |
426 | void emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32 = false); |
427 | void emitStoreCell(int index, RegisterID payload, bool indexIsCell = false); |
428 | void emitStoreBool(int index, RegisterID payload, bool indexIsBool = false); |
429 | void emitStoreDouble(int index, FPRegisterID value); |
430 | |
431 | void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex); |
432 | void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag); |
433 | |
434 | void compileGetByIdHotPath(const Identifier*); |
435 | |
436 | // Arithmetic opcode helpers |
437 | template <typename Op> |
438 | void emitBinaryDoubleOp(const Instruction *, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true); |
439 | |
440 | #else // USE(JSVALUE32_64) |
441 | void emitGetVirtualRegister(int src, RegisterID dst); |
442 | void emitGetVirtualRegister(VirtualRegister src, RegisterID dst); |
443 | void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2); |
444 | void emitGetVirtualRegisters(VirtualRegister src1, RegisterID dst1, VirtualRegister src2, RegisterID dst2); |
445 | void emitPutVirtualRegister(int dst, RegisterID from = regT0); |
446 | void emitPutVirtualRegister(VirtualRegister dst, RegisterID from = regT0); |
447 | void emitStoreCell(int dst, RegisterID payload, bool /* only used in JSValue32_64 */ = false) |
448 | { |
449 | emitPutVirtualRegister(dst, payload); |
450 | } |
451 | void emitStoreCell(VirtualRegister dst, RegisterID payload) |
452 | { |
453 | emitPutVirtualRegister(dst, payload); |
454 | } |
455 | |
456 | Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID); |
457 | void emitJumpSlowCaseIfJSCell(RegisterID); |
458 | void emitJumpSlowCaseIfNotJSCell(RegisterID); |
459 | void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg); |
460 | Jump emitJumpIfNotInt(RegisterID, RegisterID, RegisterID scratch); |
461 | PatchableJump emitPatchableJumpIfNotInt(RegisterID); |
462 | void emitJumpSlowCaseIfNotInt(RegisterID); |
463 | void emitJumpSlowCaseIfNotNumber(RegisterID); |
464 | void emitJumpSlowCaseIfNotInt(RegisterID, RegisterID, RegisterID scratch); |
465 | |
466 | void compileGetByIdHotPath(int baseVReg, const Identifier*); |
467 | |
468 | #endif // USE(JSVALUE32_64) |
469 | |
470 | template<typename Op> |
471 | void emit_compareAndJump(const Instruction*, RelationalCondition); |
472 | template<typename Op> |
473 | void emit_compareUnsigned(const Instruction*, RelationalCondition); |
474 | template<typename Op> |
475 | void emit_compareUnsignedAndJump(const Instruction*, RelationalCondition); |
476 | template<typename Op> |
477 | void emit_compareAndJumpSlow(const Instruction*, DoubleCondition, size_t (JIT_OPERATION *operation)(JSGlobalObject*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator&); |
478 | |
479 | void assertStackPointerOffset(); |
480 | |
481 | void emit_op_add(const Instruction*); |
482 | void emit_op_bitand(const Instruction*); |
483 | void emit_op_bitor(const Instruction*); |
484 | void emit_op_bitxor(const Instruction*); |
485 | void emit_op_bitnot(const Instruction*); |
486 | void emit_op_call(const Instruction*); |
487 | void emit_op_tail_call(const Instruction*); |
488 | void emit_op_call_eval(const Instruction*); |
489 | void emit_op_call_varargs(const Instruction*); |
490 | void emit_op_tail_call_varargs(const Instruction*); |
491 | void emit_op_tail_call_forward_arguments(const Instruction*); |
492 | void emit_op_construct_varargs(const Instruction*); |
493 | void emit_op_catch(const Instruction*); |
494 | void emit_op_construct(const Instruction*); |
495 | void emit_op_create_this(const Instruction*); |
496 | void emit_op_to_this(const Instruction*); |
497 | void emit_op_get_argument(const Instruction*); |
498 | void emit_op_argument_count(const Instruction*); |
499 | void emit_op_get_rest_length(const Instruction*); |
500 | void emit_op_check_tdz(const Instruction*); |
501 | void emit_op_identity_with_profile(const Instruction*); |
502 | void emit_op_debug(const Instruction*); |
503 | void emit_op_del_by_id(const Instruction*); |
504 | void emit_op_del_by_val(const Instruction*); |
505 | void emit_op_div(const Instruction*); |
506 | void emit_op_end(const Instruction*); |
507 | void emit_op_enter(const Instruction*); |
508 | void emit_op_get_scope(const Instruction*); |
509 | void emit_op_eq(const Instruction*); |
510 | void emit_op_eq_null(const Instruction*); |
511 | void emit_op_below(const Instruction*); |
512 | void emit_op_beloweq(const Instruction*); |
513 | void emit_op_try_get_by_id(const Instruction*); |
514 | void emit_op_get_by_id(const Instruction*); |
515 | void emit_op_get_by_id_with_this(const Instruction*); |
516 | void emit_op_get_by_id_direct(const Instruction*); |
517 | void emit_op_get_by_val(const Instruction*); |
518 | void emit_op_get_argument_by_val(const Instruction*); |
519 | void emit_op_in_by_id(const Instruction*); |
520 | void emit_op_init_lazy_reg(const Instruction*); |
521 | void emit_op_overrides_has_instance(const Instruction*); |
522 | void emit_op_instanceof(const Instruction*); |
523 | void emit_op_instanceof_custom(const Instruction*); |
524 | void emit_op_is_empty(const Instruction*); |
525 | void emit_op_is_undefined(const Instruction*); |
526 | void emit_op_is_undefined_or_null(const Instruction*); |
527 | void emit_op_is_boolean(const Instruction*); |
528 | void emit_op_is_number(const Instruction*); |
529 | void emit_op_is_object(const Instruction*); |
530 | void emit_op_is_cell_with_type(const Instruction*); |
531 | void emit_op_jeq_null(const Instruction*); |
532 | void emit_op_jfalse(const Instruction*); |
533 | void emit_op_jmp(const Instruction*); |
534 | void emit_op_jneq_null(const Instruction*); |
535 | void emit_op_jundefined_or_null(const Instruction*); |
536 | void emit_op_jnundefined_or_null(const Instruction*); |
537 | void emit_op_jneq_ptr(const Instruction*); |
538 | void emit_op_jless(const Instruction*); |
539 | void emit_op_jlesseq(const Instruction*); |
540 | void emit_op_jgreater(const Instruction*); |
541 | void emit_op_jgreatereq(const Instruction*); |
542 | void emit_op_jnless(const Instruction*); |
543 | void emit_op_jnlesseq(const Instruction*); |
544 | void emit_op_jngreater(const Instruction*); |
545 | void emit_op_jngreatereq(const Instruction*); |
546 | void emit_op_jeq(const Instruction*); |
547 | void emit_op_jneq(const Instruction*); |
548 | void emit_op_jstricteq(const Instruction*); |
549 | void emit_op_jnstricteq(const Instruction*); |
550 | void emit_op_jbelow(const Instruction*); |
551 | void emit_op_jbeloweq(const Instruction*); |
552 | void emit_op_jtrue(const Instruction*); |
553 | void emit_op_loop_hint(const Instruction*); |
554 | void emit_op_check_traps(const Instruction*); |
555 | void emit_op_nop(const Instruction*); |
556 | void emit_op_super_sampler_begin(const Instruction*); |
557 | void emit_op_super_sampler_end(const Instruction*); |
558 | void emit_op_lshift(const Instruction*); |
559 | void emit_op_mod(const Instruction*); |
560 | void emit_op_mov(const Instruction*); |
561 | void emit_op_mul(const Instruction*); |
562 | void emit_op_negate(const Instruction*); |
563 | void emit_op_neq(const Instruction*); |
564 | void emit_op_neq_null(const Instruction*); |
565 | void emit_op_new_array(const Instruction*); |
566 | void emit_op_new_array_with_size(const Instruction*); |
567 | void emit_op_new_func(const Instruction*); |
568 | void emit_op_new_func_exp(const Instruction*); |
569 | void emit_op_new_generator_func(const Instruction*); |
570 | void emit_op_new_generator_func_exp(const Instruction*); |
571 | void emit_op_new_async_func(const Instruction*); |
572 | void emit_op_new_async_func_exp(const Instruction*); |
573 | void emit_op_new_async_generator_func(const Instruction*); |
574 | void emit_op_new_async_generator_func_exp(const Instruction*); |
575 | void emit_op_new_object(const Instruction*); |
576 | void emit_op_new_regexp(const Instruction*); |
577 | void emit_op_not(const Instruction*); |
578 | void emit_op_nstricteq(const Instruction*); |
579 | void emit_op_dec(const Instruction*); |
580 | void emit_op_inc(const Instruction*); |
581 | void emit_op_profile_type(const Instruction*); |
582 | void emit_op_profile_control_flow(const Instruction*); |
583 | void emit_op_get_parent_scope(const Instruction*); |
584 | void emit_op_put_by_id(const Instruction*); |
585 | template<typename Op = OpPutByVal> |
586 | void emit_op_put_by_val(const Instruction*); |
587 | void emit_op_put_by_val_direct(const Instruction*); |
588 | void emit_op_put_getter_by_id(const Instruction*); |
589 | void emit_op_put_setter_by_id(const Instruction*); |
590 | void emit_op_put_getter_setter_by_id(const Instruction*); |
591 | void emit_op_put_getter_by_val(const Instruction*); |
592 | void emit_op_put_setter_by_val(const Instruction*); |
593 | void emit_op_ret(const Instruction*); |
594 | void emit_op_rshift(const Instruction*); |
595 | void emit_op_set_function_name(const Instruction*); |
596 | void emit_op_stricteq(const Instruction*); |
597 | void emit_op_sub(const Instruction*); |
598 | void emit_op_switch_char(const Instruction*); |
599 | void emit_op_switch_imm(const Instruction*); |
600 | void emit_op_switch_string(const Instruction*); |
601 | void emit_op_tear_off_arguments(const Instruction*); |
602 | void emit_op_throw(const Instruction*); |
603 | void emit_op_to_number(const Instruction*); |
604 | void emit_op_to_numeric(const Instruction*); |
605 | void emit_op_to_string(const Instruction*); |
606 | void emit_op_to_object(const Instruction*); |
607 | void emit_op_to_primitive(const Instruction*); |
608 | void emit_op_unexpected_load(const Instruction*); |
609 | void emit_op_unsigned(const Instruction*); |
610 | void emit_op_urshift(const Instruction*); |
611 | void emit_op_has_structure_property(const Instruction*); |
612 | void emit_op_has_indexed_property(const Instruction*); |
613 | void emit_op_get_direct_pname(const Instruction*); |
614 | void emit_op_enumerator_structure_pname(const Instruction*); |
615 | void emit_op_enumerator_generic_pname(const Instruction*); |
616 | void emit_op_get_internal_field(const Instruction*); |
617 | void emit_op_put_internal_field(const Instruction*); |
618 | void emit_op_log_shadow_chicken_prologue(const Instruction*); |
619 | void emit_op_log_shadow_chicken_tail(const Instruction*); |
620 | |
621 | void emitSlow_op_add(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
622 | void emitSlow_op_call(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
623 | void emitSlow_op_tail_call(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
624 | void emitSlow_op_call_eval(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
625 | void emitSlow_op_call_varargs(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
626 | void emitSlow_op_tail_call_varargs(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
627 | void emitSlow_op_tail_call_forward_arguments(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
628 | void emitSlow_op_construct_varargs(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
629 | void emitSlow_op_construct(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
630 | void emitSlow_op_eq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
631 | void emitSlow_op_get_callee(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
632 | void emitSlow_op_try_get_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
633 | void emitSlow_op_get_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
634 | void emitSlow_op_get_by_id_with_this(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
635 | void emitSlow_op_get_by_id_direct(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
636 | void emitSlow_op_get_by_val(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
637 | void emitSlow_op_get_argument_by_val(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
638 | void emitSlow_op_in_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
639 | void emitSlow_op_instanceof(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
640 | void emitSlow_op_instanceof_custom(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
641 | void emitSlow_op_jless(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
642 | void emitSlow_op_jlesseq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
643 | void emitSlow_op_jgreater(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
644 | void emitSlow_op_jgreatereq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
645 | void emitSlow_op_jnless(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
646 | void emitSlow_op_jnlesseq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
647 | void emitSlow_op_jngreater(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
648 | void emitSlow_op_jngreatereq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
649 | void emitSlow_op_jeq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
650 | void emitSlow_op_jneq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
651 | void emitSlow_op_jstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
652 | void emitSlow_op_jnstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
653 | void emitSlow_op_jtrue(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
654 | void emitSlow_op_loop_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
655 | void emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
656 | void emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
657 | void emitSlow_op_mul(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
658 | void emitSlow_op_negate(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
659 | void emitSlow_op_neq(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
660 | void emitSlow_op_new_object(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
661 | void emitSlow_op_put_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
662 | void emitSlow_op_put_by_val(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
663 | void emitSlow_op_sub(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
664 | void emitSlow_op_has_indexed_property(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
665 | |
666 | void emit_op_resolve_scope(const Instruction*); |
667 | void emit_op_get_from_scope(const Instruction*); |
668 | void emit_op_put_to_scope(const Instruction*); |
669 | void emit_op_get_from_arguments(const Instruction*); |
670 | void emit_op_put_to_arguments(const Instruction*); |
671 | void emitSlow_op_get_from_scope(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
672 | void emitSlow_op_put_to_scope(const Instruction*, Vector<SlowCaseEntry>::iterator&); |
673 | |
674 | void emitSlowCaseCall(const Instruction*, Vector<SlowCaseEntry>::iterator&, SlowPathFunction); |
675 | |
676 | void emitRightShift(const Instruction*, bool isUnsigned); |
677 | void emitRightShiftSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned); |
678 | |
679 | template<typename Op> |
680 | void emitNewFuncCommon(const Instruction*); |
681 | template<typename Op> |
682 | void emitNewFuncExprCommon(const Instruction*); |
683 | void emitVarInjectionCheck(bool needsVarInjectionChecks); |
684 | void emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth); |
685 | void emitLoadWithStructureCheck(int scope, Structure** structureSlot); |
686 | #if USE(JSVALUE64) |
687 | void emitGetVarFromPointer(JSValue* operand, GPRReg); |
688 | void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg); |
689 | #else |
690 | void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload); |
691 | void emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload); |
692 | #endif |
693 | void emitGetClosureVar(int scope, uintptr_t operand); |
694 | void emitNotifyWrite(WatchpointSet*); |
695 | void emitNotifyWrite(GPRReg pointerToSet); |
696 | void emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet*); |
697 | void emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet**); |
698 | void emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet*); |
699 | |
700 | void emitInitRegister(int dst); |
701 | |
702 | void (RegisterID from, int entry); |
703 | |
704 | JSValue getConstantOperand(int src); |
705 | bool isOperandConstantInt(int src); |
706 | bool isOperandConstantChar(int src); |
707 | |
708 | template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> |
709 | void emitMathICFast(JITUnaryMathIC<Generator>*, const Instruction*, ProfiledFunction, NonProfiledFunction); |
710 | template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> |
711 | void emitMathICFast(JITBinaryMathIC<Generator>*, const Instruction*, ProfiledFunction, NonProfiledFunction); |
712 | |
713 | template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> |
714 | void emitMathICSlow(JITBinaryMathIC<Generator>*, const Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); |
715 | template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> |
716 | void emitMathICSlow(JITUnaryMathIC<Generator>*, const Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); |
717 | |
718 | Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter) |
719 | { |
720 | return iter++->from; |
721 | } |
722 | void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter) |
723 | { |
724 | if (iter->from.isSet()) |
725 | iter->from.link(this); |
726 | ++iter; |
727 | } |
728 | void linkDummySlowCase(Vector<SlowCaseEntry>::iterator& iter) |
729 | { |
730 | ASSERT(!iter->from.isSet()); |
731 | ++iter; |
732 | } |
733 | void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int virtualRegisterIndex); |
734 | void linkAllSlowCasesForBytecodeIndex(Vector<SlowCaseEntry>& slowCases, |
735 | Vector<SlowCaseEntry>::iterator&, BytecodeIndex bytecodeOffset); |
736 | |
737 | void linkAllSlowCases(Vector<SlowCaseEntry>::iterator& iter) |
738 | { |
739 | linkAllSlowCasesForBytecodeIndex(m_slowCases, iter, m_bytecodeIndex); |
740 | } |
741 | |
742 | MacroAssembler::Call appendCallWithExceptionCheck(const FunctionPtr<CFunctionPtrTag>); |
743 | #if OS(WINDOWS) && CPU(X86_64) |
744 | MacroAssembler::Call appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr<CFunctionPtrTag>); |
745 | #endif |
746 | MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr<CFunctionPtrTag>); |
747 | MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr<CFunctionPtrTag>, int); |
748 | template<typename Metadata> |
749 | MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata&, const FunctionPtr<CFunctionPtrTag>, int); |
750 | |
751 | template<typename OperationType, typename... Args> |
752 | std::enable_if_t<FunctionTraits<OperationType>::hasResult, MacroAssembler::Call> |
753 | callOperation(OperationType operation, int result, Args... args) |
754 | { |
755 | setupArguments<OperationType>(args...); |
756 | return appendCallWithExceptionCheckSetJSValueResult(operation, result); |
757 | } |
758 | |
759 | #if OS(WINDOWS) && CPU(X86_64) |
760 | template<typename OperationType, typename... Args> |
761 | std::enable_if_t<std::is_same<typename FunctionTraits<OperationType>::ResultType, SlowPathReturnType>::value, MacroAssembler::Call> |
762 | callOperation(OperationType operation, Args... args) |
763 | { |
764 | setupArguments<OperationType>(args...); |
765 | return appendCallWithExceptionCheckAndSlowPathReturnType(operation); |
766 | } |
767 | |
768 | template<typename Type> |
769 | struct is64BitType { |
770 | static constexpr bool value = sizeof(Type) <= 8; |
771 | }; |
772 | |
773 | template<> |
774 | struct is64BitType<void> { |
775 | static constexpr bool value = true; |
776 | }; |
777 | |
778 | template<typename OperationType, typename... Args> |
779 | std::enable_if_t<!std::is_same<typename FunctionTraits<OperationType>::ResultType, SlowPathReturnType>::value, MacroAssembler::Call> |
780 | callOperation(OperationType operation, Args... args) |
781 | { |
782 | static_assert(is64BitType<typename FunctionTraits<OperationType>::ResultType>::value, "Win64 cannot use standard call when return type is larger than 64 bits." ); |
783 | setupArguments<OperationType>(args...); |
784 | return appendCallWithExceptionCheck(operation); |
785 | } |
786 | #else // OS(WINDOWS) && CPU(X86_64) |
787 | template<typename OperationType, typename... Args> |
788 | MacroAssembler::Call callOperation(OperationType operation, Args... args) |
789 | { |
790 | setupArguments<OperationType>(args...); |
791 | return appendCallWithExceptionCheck(operation); |
792 | } |
793 | #endif // OS(WINDOWS) && CPU(X86_64) |
794 | |
795 | template<typename Metadata, typename OperationType, typename... Args> |
796 | std::enable_if_t<FunctionTraits<OperationType>::hasResult, MacroAssembler::Call> |
797 | callOperationWithProfile(Metadata& metadata, OperationType operation, int result, Args... args) |
798 | { |
799 | setupArguments<OperationType>(args...); |
800 | return appendCallWithExceptionCheckSetJSValueResultWithProfile(metadata, operation, result); |
801 | } |
802 | |
803 | template<typename OperationType, typename... Args> |
804 | MacroAssembler::Call callOperationWithResult(OperationType operation, JSValueRegs resultRegs, Args... args) |
805 | { |
806 | setupArguments<OperationType>(args...); |
807 | auto result = appendCallWithExceptionCheck(operation); |
808 | setupResults(resultRegs); |
809 | return result; |
810 | } |
811 | |
812 | template<typename OperationType, typename... Args> |
813 | MacroAssembler::Call callOperationNoExceptionCheck(OperationType operation, Args... args) |
814 | { |
815 | setupArguments<OperationType>(args...); |
816 | updateTopCallFrame(); |
817 | return appendCall(operation); |
818 | } |
819 | |
820 | template<typename OperationType, typename... Args> |
821 | MacroAssembler::Call callOperationWithCallFrameRollbackOnException(OperationType operation, Args... args) |
822 | { |
823 | setupArguments<OperationType>(args...); |
824 | return appendCallWithCallFrameRollbackOnException(operation); |
825 | } |
826 | |
827 | enum class ProfilingPolicy { |
828 | ShouldEmitProfiling, |
829 | NoProfiling |
830 | }; |
831 | |
832 | template<typename Op, typename SnippetGenerator> |
833 | void emitBitBinaryOpFastPath(const Instruction* currentInstruction, ProfilingPolicy shouldEmitProfiling = ProfilingPolicy::NoProfiling); |
834 | |
835 | void emitRightShiftFastPath(const Instruction* currentInstruction, OpcodeID); |
836 | |
837 | template<typename Op> |
838 | void emitRightShiftFastPath(const Instruction* currentInstruction, JITRightShiftGenerator::ShiftType); |
839 | |
840 | void updateTopCallFrame(); |
841 | |
842 | Call emitNakedCall(CodePtr<NoPtrTag> function = CodePtr<NoPtrTag>()); |
843 | Call emitNakedTailCall(CodePtr<NoPtrTag> function = CodePtr<NoPtrTag>()); |
844 | |
845 | // Loads the character value of a single character string into dst. |
846 | void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures); |
847 | |
848 | int jumpTarget(const Instruction*, int target); |
849 | |
850 | #if ENABLE(DFG_JIT) |
851 | void emitEnterOptimizationCheck(); |
852 | #else |
853 | void emitEnterOptimizationCheck() { } |
854 | #endif |
855 | |
856 | #ifndef NDEBUG |
857 | void printBytecodeOperandTypes(int src1, int src2); |
858 | #endif |
859 | |
860 | #if ENABLE(SAMPLING_FLAGS) |
861 | void setSamplingFlag(int32_t); |
862 | void clearSamplingFlag(int32_t); |
863 | #endif |
864 | |
865 | #if ENABLE(SAMPLING_COUNTERS) |
866 | void emitCount(AbstractSamplingCounter&, int32_t = 1); |
867 | #endif |
868 | |
869 | #if ENABLE(OPCODE_SAMPLING) |
870 | void sampleInstruction(const Instruction*, bool = false); |
871 | #endif |
872 | |
873 | #if ENABLE(CODEBLOCK_SAMPLING) |
874 | void sampleCodeBlock(CodeBlock*); |
875 | #else |
876 | void sampleCodeBlock(CodeBlock*) {} |
877 | #endif |
878 | |
879 | #if ENABLE(DFG_JIT) |
880 | bool canBeOptimized() { return m_canBeOptimized; } |
881 | bool canBeOptimizedOrInlined() { return m_canBeOptimizedOrInlined; } |
882 | bool shouldEmitProfiling() { return m_shouldEmitProfiling; } |
883 | #else |
884 | bool canBeOptimized() { return false; } |
885 | bool canBeOptimizedOrInlined() { return false; } |
886 | // Enables use of value profiler with tiered compilation turned off, |
887 | // in which case all code gets profiled. |
888 | bool shouldEmitProfiling() { return false; } |
889 | #endif |
890 | |
891 | static bool reportCompileTimes(); |
892 | static bool computeCompileTimes(); |
893 | |
894 | // If you need to check a value from the metadata table and you need it to |
895 | // be consistent across the fast and slow path, then you want to use this. |
896 | // It will give the slow path the same value read by the fast path. |
897 | GetPutInfo copiedGetPutInfo(OpPutToScope); |
898 | template<typename BinaryOp> |
899 | BinaryArithProfile copiedArithProfile(BinaryOp); |
900 | |
901 | Interpreter* m_interpreter; |
902 | |
903 | Vector<CallRecord> m_calls; |
904 | Vector<Label> m_labels; |
905 | Vector<JITGetByIdGenerator> m_getByIds; |
906 | Vector<JITGetByValGenerator> m_getByVals; |
907 | Vector<JITGetByIdWithThisGenerator> m_getByIdsWithThis; |
908 | Vector<JITPutByIdGenerator> m_putByIds; |
909 | Vector<JITInByIdGenerator> m_inByIds; |
910 | Vector<JITInstanceOfGenerator> m_instanceOfs; |
911 | Vector<ByValCompilationInfo> m_byValCompilationInfo; |
912 | Vector<CallCompilationInfo> m_callCompilationInfo; |
913 | Vector<JumpTable> m_jmpTable; |
914 | |
915 | BytecodeIndex m_bytecodeIndex; |
916 | Vector<SlowCaseEntry> m_slowCases; |
917 | Vector<SwitchRecord> m_switches; |
918 | |
919 | HashMap<unsigned, unsigned> m_copiedGetPutInfos; |
920 | HashMap<uint64_t, BinaryArithProfile> m_copiedArithProfiles; |
921 | |
922 | JumpList m_exceptionChecks; |
923 | JumpList m_exceptionChecksWithCallFrameRollback; |
924 | Label m_exceptionHandler; |
925 | |
926 | unsigned m_getByIdIndex { UINT_MAX }; |
927 | unsigned m_getByValIndex { UINT_MAX }; |
928 | unsigned m_getByIdWithThisIndex { UINT_MAX }; |
929 | unsigned m_putByIdIndex { UINT_MAX }; |
930 | unsigned m_inByIdIndex { UINT_MAX }; |
931 | unsigned m_instanceOfIndex { UINT_MAX }; |
932 | unsigned m_byValInstructionIndex { UINT_MAX }; |
933 | unsigned m_callLinkInfoIndex { UINT_MAX }; |
934 | |
935 | Label m_arityCheck; |
936 | std::unique_ptr<LinkBuffer> m_linkBuffer; |
937 | |
938 | std::unique_ptr<JITDisassembler> m_disassembler; |
939 | RefPtr<Profiler::Compilation> m_compilation; |
940 | |
941 | PCToCodeOriginMapBuilder m_pcToCodeOriginMapBuilder; |
942 | |
943 | HashMap<const Instruction*, void*> m_instructionToMathIC; |
944 | HashMap<const Instruction*, MathICGenerationState> m_instructionToMathICGenerationState; |
945 | |
946 | bool m_canBeOptimized; |
947 | bool m_canBeOptimizedOrInlined; |
948 | bool m_shouldEmitProfiling; |
949 | BytecodeIndex m_loopOSREntryBytecodeIndex; |
950 | }; |
951 | |
952 | } // namespace JSC |
953 | |
954 | |
955 | #endif // ENABLE(JIT) |
956 | |