1 | /* |
2 | * Copyright (C) 2012-2017 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "LowLevelInterpreter.h" |
28 | |
29 | #include "LLIntOfflineAsmConfig.h" |
30 | #include <wtf/InlineASM.h> |
31 | |
32 | #if ENABLE(C_LOOP) |
33 | #include "Bytecodes.h" |
34 | #include "CLoopStackInlines.h" |
35 | #include "CodeBlock.h" |
36 | #include "CommonSlowPaths.h" |
37 | #include "Interpreter.h" |
38 | #include "LLIntCLoop.h" |
39 | #include "LLIntData.h" |
40 | #include "LLIntSlowPaths.h" |
41 | #include "JSCInlines.h" |
42 | #include <wtf/Assertions.h> |
43 | #include <wtf/MathExtras.h> |
44 | |
45 | using namespace JSC::LLInt; |
46 | |
47 | // LLInt C Loop opcodes |
48 | // ==================== |
49 | // In the implementation of the C loop, the LLint trampoline glue functions |
50 | // (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as |
51 | // if they are bytecode handlers. That means the names of the trampoline |
52 | // functions will be added to the OpcodeID list via the |
53 | // FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID() |
54 | // includes. |
55 | // |
56 | // In addition, some JIT trampoline functions which are needed by LLInt |
57 | // (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as |
58 | // bytecodes, and the CLoop will provide bytecode handlers for them. |
59 | // |
60 | // In the CLoop, we can only dispatch indirectly to these bytecodes |
61 | // (including the LLInt and JIT extensions). All other dispatches |
62 | // (i.e. goto's) must be to a known label (i.e. local / global labels). |
63 | |
64 | |
65 | // How are the opcodes named? |
66 | // ========================== |
67 | // Here is a table to show examples of how each of the manifestation of the |
68 | // opcodes are named: |
69 | // |
70 | // Type: Opcode Trampoline Glue |
71 | // ====== =============== |
72 | // [In the llint .asm files] |
73 | // llint labels: llint_op_enter llint_program_prologue |
74 | // |
75 | // OpcodeID: op_enter llint_program |
76 | // [in Opcode.h] [in LLIntOpcode.h] |
77 | // |
78 | // When using a switch statement dispatch in the CLoop, each "opcode" is |
79 | // a case statement: |
80 | // Opcode: case op_enter: case llint_program_prologue: |
81 | // |
82 | // When using a computed goto dispatch in the CLoop, each opcode is a label: |
83 | // Opcode: op_enter: llint_program_prologue: |
84 | |
85 | |
86 | //============================================================================ |
87 | // Define the opcode dispatch mechanism when using the C loop: |
88 | // |
89 | |
90 | // These are for building a C Loop interpreter: |
91 | #define OFFLINE_ASM_BEGIN |
92 | #define OFFLINE_ASM_END |
93 | |
94 | #if ENABLE(OPCODE_TRACING) |
95 | #define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode) |
96 | #else |
97 | #define TRACE_OPCODE(opcode) |
98 | #endif |
99 | |
100 | // To keep compilers happy in case of unused labels, force usage of the label: |
101 | #define USE_LABEL(label) \ |
102 | do { \ |
103 | if (false) \ |
104 | goto label; \ |
105 | } while (false) |
106 | |
107 | #define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode); |
108 | |
109 | #define OFFLINE_ASM_GLOBAL_LABEL(label) label: USE_LABEL(label); |
110 | |
111 | #if ENABLE(LABEL_TRACING) |
112 | #define TRACE_LABEL(prefix, label) dataLog(#prefix, ": ", #label, "\n") |
113 | #else |
114 | #define TRACE_LABEL(prefix, label) do { } while (false); |
115 | #endif |
116 | |
117 | |
118 | #if ENABLE(COMPUTED_GOTO_OPCODES) |
119 | #define OFFLINE_ASM_GLUE_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_GLUE_LABEL", label); USE_LABEL(label); |
120 | #else |
121 | #define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label); |
122 | #endif |
123 | |
124 | #define OFFLINE_ASM_LOCAL_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_LOCAL_LABEL", #label); USE_LABEL(label); |
125 | |
126 | namespace JSC { |
127 | |
128 | //============================================================================ |
129 | // CLoopRegister is the storage for an emulated CPU register. |
130 | // It defines the policy of how ints smaller than intptr_t are packed into the |
131 | // pseudo register, as well as hides endianness differences. |
132 | |
133 | class CLoopRegister { |
134 | public: |
135 | ALWAYS_INLINE intptr_t i() const { return m_value; }; |
136 | ALWAYS_INLINE uintptr_t u() const { return m_value; } |
137 | ALWAYS_INLINE int32_t i32() const { return m_value; } |
138 | ALWAYS_INLINE uint32_t u32() const { return m_value; } |
139 | ALWAYS_INLINE int8_t i8() const { return m_value; } |
140 | ALWAYS_INLINE uint8_t u8() const { return m_value; } |
141 | |
142 | ALWAYS_INLINE intptr_t* ip() const { return bitwise_cast<intptr_t*>(m_value); } |
143 | ALWAYS_INLINE int8_t* i8p() const { return bitwise_cast<int8_t*>(m_value); } |
144 | ALWAYS_INLINE void* vp() const { return bitwise_cast<void*>(m_value); } |
145 | ALWAYS_INLINE const void* cvp() const { return bitwise_cast<const void*>(m_value); } |
146 | ALWAYS_INLINE CallFrame* callFrame() const { return bitwise_cast<CallFrame*>(m_value); } |
147 | ALWAYS_INLINE ExecState* execState() const { return bitwise_cast<ExecState*>(m_value); } |
148 | ALWAYS_INLINE const void* instruction() const { return bitwise_cast<const void*>(m_value); } |
149 | ALWAYS_INLINE VM* vm() const { return bitwise_cast<VM*>(m_value); } |
150 | ALWAYS_INLINE JSCell* cell() const { return bitwise_cast<JSCell*>(m_value); } |
151 | ALWAYS_INLINE ProtoCallFrame* protoCallFrame() const { return bitwise_cast<ProtoCallFrame*>(m_value); } |
152 | ALWAYS_INLINE NativeFunction nativeFunc() const { return bitwise_cast<NativeFunction>(m_value); } |
153 | #if USE(JSVALUE64) |
154 | ALWAYS_INLINE int64_t i64() const { return m_value; } |
155 | ALWAYS_INLINE uint64_t u64() const { return m_value; } |
156 | ALWAYS_INLINE EncodedJSValue encodedJSValue() const { return bitwise_cast<EncodedJSValue>(m_value); } |
157 | #endif |
158 | ALWAYS_INLINE Opcode opcode() const { return bitwise_cast<Opcode>(m_value); } |
159 | |
160 | operator ExecState*() { return bitwise_cast<ExecState*>(m_value); } |
161 | operator const Instruction*() { return bitwise_cast<const Instruction*>(m_value); } |
162 | operator JSCell*() { return bitwise_cast<JSCell*>(m_value); } |
163 | operator ProtoCallFrame*() { return bitwise_cast<ProtoCallFrame*>(m_value); } |
164 | operator Register*() { return bitwise_cast<Register*>(m_value); } |
165 | operator VM*() { return bitwise_cast<VM*>(m_value); } |
166 | |
167 | template<typename T, typename = std::enable_if_t<sizeof(T) == sizeof(uintptr_t)>> |
168 | ALWAYS_INLINE void operator=(T value) { m_value = bitwise_cast<uintptr_t>(value); } |
169 | #if USE(JSVALUE64) |
170 | ALWAYS_INLINE void operator=(int32_t value) { m_value = static_cast<intptr_t>(value); } |
171 | ALWAYS_INLINE void operator=(uint32_t value) { m_value = static_cast<uintptr_t>(value); } |
172 | #endif |
173 | ALWAYS_INLINE void operator=(int16_t value) { m_value = static_cast<intptr_t>(value); } |
174 | ALWAYS_INLINE void operator=(uint16_t value) { m_value = static_cast<uintptr_t>(value); } |
175 | ALWAYS_INLINE void operator=(int8_t value) { m_value = static_cast<intptr_t>(value); } |
176 | ALWAYS_INLINE void operator=(uint8_t value) { m_value = static_cast<uintptr_t>(value); } |
177 | ALWAYS_INLINE void operator=(bool value) { m_value = static_cast<uintptr_t>(value); } |
178 | |
179 | #if USE(JSVALUE64) |
180 | ALWAYS_INLINE double bitsAsDouble() const { return bitwise_cast<double>(m_value); } |
181 | ALWAYS_INLINE int64_t bitsAsInt64() const { return bitwise_cast<int64_t>(m_value); } |
182 | #endif |
183 | |
184 | private: |
185 | uintptr_t m_value { static_cast<uintptr_t>(0xbadbeef0baddbeef) }; |
186 | }; |
187 | |
188 | class CLoopDoubleRegister { |
189 | public: |
190 | template<typename T> |
191 | explicit operator T() const { return bitwise_cast<T>(m_value); } |
192 | |
193 | ALWAYS_INLINE double d() const { return m_value; } |
194 | ALWAYS_INLINE int64_t bitsAsInt64() const { return bitwise_cast<int64_t>(m_value); } |
195 | |
196 | ALWAYS_INLINE void operator=(double value) { m_value = value; } |
197 | |
198 | template<typename T, typename = std::enable_if_t<sizeof(T) == sizeof(uintptr_t) && std::is_integral<T>::value>> |
199 | ALWAYS_INLINE void operator=(T value) { m_value = bitwise_cast<double>(value); } |
200 | |
201 | private: |
202 | double m_value; |
203 | }; |
204 | |
205 | //============================================================================ |
206 | // Some utilities: |
207 | // |
208 | |
209 | namespace LLInt { |
210 | |
211 | #if USE(JSVALUE32_64) |
212 | static double ints2Double(uint32_t lo, uint32_t hi) |
213 | { |
214 | uint64_t value = (static_cast<uint64_t>(hi) << 32) | lo; |
215 | return bitwise_cast<double>(value); |
216 | } |
217 | |
218 | static void double2Ints(double val, CLoopRegister& lo, CLoopRegister& hi) |
219 | { |
220 | uint64_t value = bitwise_cast<uint64_t>(val); |
221 | hi = static_cast<uint32_t>(value >> 32); |
222 | lo = static_cast<uint32_t>(value); |
223 | } |
224 | #endif // USE(JSVALUE32_64) |
225 | |
226 | static void decodeResult(SlowPathReturnType result, CLoopRegister& t0, CLoopRegister& t1) |
227 | { |
228 | const void* t0Result; |
229 | const void* t1Result; |
230 | JSC::decodeResult(result, t0Result, t1Result); |
231 | t0 = t0Result; |
232 | t1 = t1Result; |
233 | } |
234 | |
235 | } // namespace LLint |
236 | |
237 | //============================================================================ |
238 | // The llint C++ interpreter loop: |
239 | // |
240 | |
241 | JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass) |
242 | { |
243 | #define CAST bitwise_cast |
244 | |
245 | // One-time initialization of our address tables. We have to put this code |
246 | // here because our labels are only in scope inside this function. The |
247 | // caller (or one of its ancestors) is responsible for ensuring that this |
248 | // is only called once during the initialization of the VM before threads |
249 | // are at play. |
250 | if (UNLIKELY(isInitializationPass)) { |
251 | Opcode* opcodeMap = LLInt::opcodeMap(); |
252 | Opcode* opcodeMapWide16 = LLInt::opcodeMapWide16(); |
253 | Opcode* opcodeMapWide32 = LLInt::opcodeMapWide32(); |
254 | |
255 | #if ENABLE(COMPUTED_GOTO_OPCODES) |
256 | #define OPCODE_ENTRY(__opcode, length) \ |
257 | opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode); \ |
258 | opcodeMapWide16[__opcode] = bitwise_cast<void*>(&&__opcode##_wide16); \ |
259 | opcodeMapWide32[__opcode] = bitwise_cast<void*>(&&__opcode##_wide32); |
260 | |
261 | #define LLINT_OPCODE_ENTRY(__opcode, length) \ |
262 | opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode); |
263 | #else |
264 | // FIXME: this mapping is unnecessarily expensive in the absence of COMPUTED_GOTO |
265 | // narrow opcodes don't need any mapping and wide opcodes just need to add numOpcodeIDs |
266 | #define OPCODE_ENTRY(__opcode, length) \ |
267 | opcodeMap[__opcode] = __opcode; \ |
268 | opcodeMapWide16[__opcode] = static_cast<OpcodeID>(__opcode##_wide16); \ |
269 | opcodeMapWide32[__opcode] = static_cast<OpcodeID>(__opcode##_wide32); |
270 | |
271 | #define LLINT_OPCODE_ENTRY(__opcode, length) \ |
272 | opcodeMap[__opcode] = __opcode; |
273 | #endif |
274 | FOR_EACH_BYTECODE_ID(OPCODE_ENTRY) |
275 | FOR_EACH_CLOOP_BYTECODE_HELPER_ID(LLINT_OPCODE_ENTRY) |
276 | FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY) |
277 | #undef OPCODE_ENTRY |
278 | #undef LLINT_OPCODE_ENTRY |
279 | |
280 | // Note: we can only set the exceptionInstructions after we have |
281 | // initialized the opcodeMap above. This is because getCodePtr() |
282 | // can depend on the opcodeMap. |
283 | uint8_t* exceptionInstructions = reinterpret_cast<uint8_t*>(LLInt::exceptionInstructions()); |
284 | for (int i = 0; i < maxOpcodeLength + 1; ++i) |
285 | exceptionInstructions[i] = llint_throw_from_slow_path_trampoline; |
286 | |
287 | return JSValue(); |
288 | } |
289 | |
290 | // Define the pseudo registers used by the LLINT C Loop backend: |
291 | static_assert(sizeof(CLoopRegister) == sizeof(intptr_t)); |
292 | |
293 | // The CLoop llint backend is initially based on the ARMv7 backend, and |
294 | // then further enhanced with a few instructions from the x86 backend to |
295 | // support building for X64 targets. Hence, the shape of the generated |
296 | // code and the usage convention of registers will look a lot like the |
297 | // ARMv7 backend's. |
298 | // |
299 | // For example, on a 32-bit build: |
300 | // 1. Outgoing args will be set up as follows: |
301 | // arg1 in t0 (r0 on ARM) |
302 | // arg2 in t1 (r1 on ARM) |
303 | // 2. 32 bit return values will be in t0 (r0 on ARM). |
304 | // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM). |
305 | // |
306 | // But instead of naming these simulator registers based on their ARM |
307 | // counterparts, we'll name them based on their original llint asm names. |
308 | // This will make it easier to correlate the generated code with the |
309 | // original llint asm code. |
310 | // |
311 | // On a 64-bit build, it more like x64 in that the registers are 64 bit. |
312 | // Hence: |
313 | // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc. |
314 | // 2. 32 bit result values will be in the low 32-bit of t0. |
315 | // 3. 64 bit result values will be in t0. |
316 | |
317 | CLoopRegister t0, t1, t2, t3, t5, sp, cfr, lr, pc; |
318 | #if USE(JSVALUE64) |
319 | CLoopRegister pcBase, tagTypeNumber, tagMask; |
320 | #endif |
321 | CLoopRegister metadataTable; |
322 | CLoopDoubleRegister d0, d1; |
323 | |
324 | struct StackPointerScope { |
325 | StackPointerScope(CLoopStack& stack) |
326 | : m_stack(stack) |
327 | , m_originalStackPointer(stack.currentStackPointer()) |
328 | { } |
329 | |
330 | ~StackPointerScope() |
331 | { |
332 | m_stack.setCurrentStackPointer(m_originalStackPointer); |
333 | } |
334 | |
335 | private: |
336 | CLoopStack& m_stack; |
337 | void* m_originalStackPointer; |
338 | }; |
339 | |
340 | CLoopStack& cloopStack = vm->interpreter->cloopStack(); |
341 | StackPointerScope stackPointerScope(cloopStack); |
342 | |
343 | lr = getOpcode(llint_return_to_host); |
344 | sp = cloopStack.currentStackPointer(); |
345 | cfr = vm->topCallFrame; |
346 | #ifndef NDEBUG |
347 | void* startSP = sp.vp(); |
348 | CallFrame* startCFR = cfr.callFrame(); |
349 | #endif |
350 | |
351 | // Initialize the incoming args for doVMEntryToJavaScript: |
352 | t0 = executableAddress; |
353 | t1 = vm; |
354 | t2 = protoCallFrame; |
355 | |
356 | #if USE(JSVALUE64) |
357 | // For the ASM llint, JITStubs takes care of this initialization. We do |
358 | // it explicitly here for the C loop: |
359 | tagTypeNumber = 0xFFFF000000000000; |
360 | tagMask = 0xFFFF000000000002; |
361 | #endif // USE(JSVALUE64) |
362 | |
363 | // Interpreter variables for value passing between opcodes and/or helpers: |
364 | NativeFunction nativeFunc = nullptr; |
365 | JSValue functionReturnValue; |
366 | Opcode opcode = getOpcode(entryOpcodeID); |
367 | |
368 | #define PUSH(cloopReg) \ |
369 | do { \ |
370 | sp = sp.ip() - 1; \ |
371 | *sp.ip() = cloopReg.i(); \ |
372 | } while (false) |
373 | |
374 | #define POP(cloopReg) \ |
375 | do { \ |
376 | cloopReg = *sp.ip(); \ |
377 | sp = sp.ip() + 1; \ |
378 | } while (false) |
379 | |
380 | #if ENABLE(OPCODE_STATS) |
381 | #define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode) |
382 | #else |
383 | #define RECORD_OPCODE_STATS(__opcode) |
384 | #endif |
385 | |
386 | #if USE(JSVALUE32_64) |
387 | #define FETCH_OPCODE() *pc.i8p |
388 | #else // USE(JSVALUE64) |
389 | #define FETCH_OPCODE() *bitwise_cast<OpcodeID*>(pcBase.i8p + pc.i) |
390 | #endif // USE(JSVALUE64) |
391 | |
392 | #define NEXT_INSTRUCTION() \ |
393 | do { \ |
394 | opcode = FETCH_OPCODE(); \ |
395 | DISPATCH_OPCODE(); \ |
396 | } while (false) |
397 | |
398 | #if ENABLE(COMPUTED_GOTO_OPCODES) |
399 | |
400 | //======================================================================== |
401 | // Loop dispatch mechanism using computed goto statements: |
402 | |
403 | #define DISPATCH_OPCODE() goto *opcode |
404 | |
405 | #define DEFINE_OPCODE(__opcode) \ |
406 | __opcode: \ |
407 | RECORD_OPCODE_STATS(__opcode); |
408 | |
409 | // Dispatch to the current PC's bytecode: |
410 | DISPATCH_OPCODE(); |
411 | |
412 | #else // !ENABLE(COMPUTED_GOTO_OPCODES) |
413 | //======================================================================== |
414 | // Loop dispatch mechanism using a C switch statement: |
415 | |
416 | #define DISPATCH_OPCODE() goto dispatchOpcode |
417 | |
418 | #define DEFINE_OPCODE(__opcode) \ |
419 | case __opcode: \ |
420 | __opcode: \ |
421 | RECORD_OPCODE_STATS(__opcode); |
422 | |
423 | // Dispatch to the current PC's bytecode: |
424 | dispatchOpcode: |
425 | switch (static_cast<unsigned>(opcode)) |
426 | |
427 | #endif // !ENABLE(COMPUTED_GOTO_OPCODES) |
428 | |
429 | //======================================================================== |
430 | // Bytecode handlers: |
431 | { |
432 | // This is the file generated by offlineasm, which contains all of the |
433 | // bytecode handlers for the interpreter, as compiled from |
434 | // LowLevelInterpreter.asm and its peers. |
435 | |
436 | IGNORE_CLANG_WARNINGS_BEGIN("unreachable-code" ) |
437 | #include "LLIntAssembly.h" |
438 | IGNORE_CLANG_WARNINGS_END |
439 | |
440 | OFFLINE_ASM_GLUE_LABEL(llint_return_to_host) |
441 | { |
442 | ASSERT(startSP == sp.vp()); |
443 | ASSERT(startCFR == cfr.callFrame()); |
444 | #if USE(JSVALUE32_64) |
445 | return JSValue(t1.i(), t0.i()); // returning JSValue(tag, payload); |
446 | #else |
447 | return JSValue::decode(t0.encodedJSValue()); |
448 | #endif |
449 | } |
450 | |
451 | // In the ASM llint, getHostCallReturnValue() is a piece of glue |
452 | // function provided by the JIT (see jit/JITOperations.cpp). |
453 | // We simulate it here with a pseduo-opcode handler. |
454 | OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue) |
455 | { |
456 | // The part in getHostCallReturnValueWithExecState(): |
457 | JSValue result = vm->hostCallReturnValue; |
458 | #if USE(JSVALUE32_64) |
459 | t1 = result.tag(); |
460 | t0 = result.payload(); |
461 | #else |
462 | t0 = JSValue::encode(result); |
463 | #endif |
464 | opcode = lr.opcode(); |
465 | DISPATCH_OPCODE(); |
466 | } |
467 | |
468 | #if !ENABLE(COMPUTED_GOTO_OPCODES) |
469 | default: |
470 | ASSERT(false); |
471 | #endif |
472 | |
473 | } // END bytecode handler cases. |
474 | |
475 | #if ENABLE(COMPUTED_GOTO_OPCODES) |
476 | // Keep the compiler happy so that it doesn't complain about unused |
477 | // labels for the LLInt trampoline glue. The labels are automatically |
478 | // emitted by label macros above, and some of them are referenced by |
479 | // the llint generated code. Since we can't tell ahead of time which |
480 | // will be referenced and which will be not, we'll just passify the |
481 | // compiler on all such labels: |
482 | #define LLINT_OPCODE_ENTRY(__opcode, length) \ |
483 | UNUSED_LABEL(__opcode); |
484 | FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY); |
485 | #undef LLINT_OPCODE_ENTRY |
486 | #endif |
487 | |
488 | #undef NEXT_INSTRUCTION |
489 | #undef DEFINE_OPCODE |
490 | #undef CHECK_FOR_TIMEOUT |
491 | #undef CAST |
492 | |
493 | return JSValue(); // to suppress a compiler warning. |
494 | } // Interpreter::llintCLoopExecute() |
495 | |
496 | } // namespace JSC |
497 | |
498 | #elif !COMPILER(MSVC) |
499 | |
500 | //============================================================================ |
501 | // Define the opcode dispatch mechanism when using an ASM loop: |
502 | // |
503 | |
504 | // These are for building an interpreter from generated assembly code: |
505 | #define OFFLINE_ASM_BEGIN asm ( |
506 | #define OFFLINE_ASM_END ); |
507 | |
508 | #if USE(LLINT_EMBEDDED_OPCODE_ID) |
509 | #define EMBED_OPCODE_ID_IF_NEEDED(__opcode) ".int " __opcode##_value_string "\n" |
510 | #else |
511 | #define EMBED_OPCODE_ID_IF_NEEDED(__opcode) |
512 | #endif |
513 | |
514 | #define OFFLINE_ASM_OPCODE_LABEL(__opcode) \ |
515 | EMBED_OPCODE_ID_IF_NEEDED(__opcode) \ |
516 | OFFLINE_ASM_OPCODE_DEBUG_LABEL(llint_##__opcode) \ |
517 | OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode) |
518 | |
519 | #define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode) |
520 | |
521 | #if CPU(ARM_THUMB2) |
522 | #define OFFLINE_ASM_GLOBAL_LABEL(label) \ |
523 | ".text\n" \ |
524 | ".align 4\n" \ |
525 | ".globl " SYMBOL_STRING(label) "\n" \ |
526 | HIDE_SYMBOL(label) "\n" \ |
527 | ".thumb\n" \ |
528 | ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \ |
529 | SYMBOL_STRING(label) ":\n" |
530 | #elif CPU(ARM64) |
531 | #define OFFLINE_ASM_GLOBAL_LABEL(label) \ |
532 | ".text\n" \ |
533 | ".align 4\n" \ |
534 | ".globl " SYMBOL_STRING(label) "\n" \ |
535 | HIDE_SYMBOL(label) "\n" \ |
536 | SYMBOL_STRING(label) ":\n" |
537 | #else |
538 | #define OFFLINE_ASM_GLOBAL_LABEL(label) \ |
539 | ".text\n" \ |
540 | ".globl " SYMBOL_STRING(label) "\n" \ |
541 | HIDE_SYMBOL(label) "\n" \ |
542 | SYMBOL_STRING(label) ":\n" |
543 | #endif |
544 | |
545 | #define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n" |
546 | |
547 | #if OS(LINUX) |
548 | #define OFFLINE_ASM_OPCODE_DEBUG_LABEL(label) #label ":\n" |
549 | #else |
550 | #define OFFLINE_ASM_OPCODE_DEBUG_LABEL(label) |
551 | #endif |
552 | |
553 | // This is a file generated by offlineasm, which contains all of the assembly code |
554 | // for the interpreter, as compiled from LowLevelInterpreter.asm. |
555 | #include "LLIntAssembly.h" |
556 | |
557 | #endif // ENABLE(C_LOOP) |
558 | |