1/*
2 * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "LowLevelInterpreter.h"
28
29#include "LLIntOfflineAsmConfig.h"
30#include <wtf/InlineASM.h>
31
32#if ENABLE(C_LOOP)
33#include "Bytecodes.h"
34#include "CLoopStackInlines.h"
35#include "CodeBlock.h"
36#include "CommonSlowPaths.h"
37#include "Interpreter.h"
38#include "LLIntCLoop.h"
39#include "LLIntData.h"
40#include "LLIntSlowPaths.h"
41#include "JSCInlines.h"
42#include <wtf/Assertions.h>
43#include <wtf/MathExtras.h>
44
45using namespace JSC::LLInt;
46
47// LLInt C Loop opcodes
48// ====================
49// In the implementation of the C loop, the LLint trampoline glue functions
50// (e.g. llint_program_prologue, llint_eval_prologue, etc) are addressed as
51// if they are bytecode handlers. That means the names of the trampoline
52// functions will be added to the OpcodeID list via the
53// FOR_EACH_LLINT_OPCODE_EXTENSION() macro that FOR_EACH_OPCODE_ID()
54// includes.
55//
56// In addition, some JIT trampoline functions which are needed by LLInt
57// (e.g. getHostCallReturnValue, ctiOpThrowNotCaught) are also added as
58// bytecodes, and the CLoop will provide bytecode handlers for them.
59//
60// In the CLoop, we can only dispatch indirectly to these bytecodes
61// (including the LLInt and JIT extensions). All other dispatches
62// (i.e. goto's) must be to a known label (i.e. local / global labels).
63
64
65// How are the opcodes named?
66// ==========================
67// Here is a table to show examples of how each of the manifestation of the
68// opcodes are named:
69//
70// Type: Opcode Trampoline Glue
71// ====== ===============
72// [In the llint .asm files]
73// llint labels: llint_op_enter llint_program_prologue
74//
75// OpcodeID: op_enter llint_program
76// [in Opcode.h] [in LLIntOpcode.h]
77//
78// When using a switch statement dispatch in the CLoop, each "opcode" is
79// a case statement:
80// Opcode: case op_enter: case llint_program_prologue:
81//
82// When using a computed goto dispatch in the CLoop, each opcode is a label:
83// Opcode: op_enter: llint_program_prologue:
84
85
86//============================================================================
87// Define the opcode dispatch mechanism when using the C loop:
88//
89
90// These are for building a C Loop interpreter:
91#define OFFLINE_ASM_BEGIN
92#define OFFLINE_ASM_END
93
94#if ENABLE(OPCODE_TRACING)
95#define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
96#else
97#define TRACE_OPCODE(opcode)
98#endif
99
100// To keep compilers happy in case of unused labels, force usage of the label:
101#define USE_LABEL(label) \
102 do { \
103 if (false) \
104 goto label; \
105 } while (false)
106
107#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
108
109#define OFFLINE_ASM_GLOBAL_LABEL(label) label: USE_LABEL(label);
110
111#if ENABLE(LABEL_TRACING)
112#define TRACE_LABEL(prefix, label) dataLog(#prefix, ": ", #label, "\n")
113#else
114#define TRACE_LABEL(prefix, label) do { } while (false);
115#endif
116
117
118#if ENABLE(COMPUTED_GOTO_OPCODES)
119#define OFFLINE_ASM_GLUE_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_GLUE_LABEL", label); USE_LABEL(label);
120#else
121#define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label);
122#endif
123
124#define OFFLINE_ASM_LOCAL_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_LOCAL_LABEL", #label); USE_LABEL(label);
125
126namespace JSC {
127
128//============================================================================
129// CLoopRegister is the storage for an emulated CPU register.
130// It defines the policy of how ints smaller than intptr_t are packed into the
131// pseudo register, as well as hides endianness differences.
132
133class CLoopRegister {
134public:
135 ALWAYS_INLINE intptr_t i() const { return m_value; };
136 ALWAYS_INLINE uintptr_t u() const { return m_value; }
137 ALWAYS_INLINE int32_t i32() const { return m_value; }
138 ALWAYS_INLINE uint32_t u32() const { return m_value; }
139 ALWAYS_INLINE int8_t i8() const { return m_value; }
140 ALWAYS_INLINE uint8_t u8() const { return m_value; }
141
142 ALWAYS_INLINE intptr_t* ip() const { return bitwise_cast<intptr_t*>(m_value); }
143 ALWAYS_INLINE int8_t* i8p() const { return bitwise_cast<int8_t*>(m_value); }
144 ALWAYS_INLINE void* vp() const { return bitwise_cast<void*>(m_value); }
145 ALWAYS_INLINE const void* cvp() const { return bitwise_cast<const void*>(m_value); }
146 ALWAYS_INLINE CallFrame* callFrame() const { return bitwise_cast<CallFrame*>(m_value); }
147 ALWAYS_INLINE const void* instruction() const { return bitwise_cast<const void*>(m_value); }
148 ALWAYS_INLINE VM* vm() const { return bitwise_cast<VM*>(m_value); }
149 ALWAYS_INLINE JSCell* cell() const { return bitwise_cast<JSCell*>(m_value); }
150 ALWAYS_INLINE ProtoCallFrame* protoCallFrame() const { return bitwise_cast<ProtoCallFrame*>(m_value); }
151 ALWAYS_INLINE NativeFunction nativeFunc() const { return bitwise_cast<NativeFunction>(m_value); }
152#if USE(JSVALUE64)
153 ALWAYS_INLINE int64_t i64() const { return m_value; }
154 ALWAYS_INLINE uint64_t u64() const { return m_value; }
155 ALWAYS_INLINE EncodedJSValue encodedJSValue() const { return bitwise_cast<EncodedJSValue>(m_value); }
156#endif
157 ALWAYS_INLINE Opcode opcode() const { return bitwise_cast<Opcode>(m_value); }
158
159 operator CallFrame*() { return bitwise_cast<CallFrame*>(m_value); }
160 operator const Instruction*() { return bitwise_cast<const Instruction*>(m_value); }
161 operator JSCell*() { return bitwise_cast<JSCell*>(m_value); }
162 operator ProtoCallFrame*() { return bitwise_cast<ProtoCallFrame*>(m_value); }
163 operator Register*() { return bitwise_cast<Register*>(m_value); }
164 operator VM*() { return bitwise_cast<VM*>(m_value); }
165
166 template<typename T, typename = std::enable_if_t<sizeof(T) == sizeof(uintptr_t)>>
167 ALWAYS_INLINE void operator=(T value) { m_value = bitwise_cast<uintptr_t>(value); }
168#if USE(JSVALUE64)
169 ALWAYS_INLINE void operator=(int32_t value) { m_value = static_cast<intptr_t>(value); }
170 ALWAYS_INLINE void operator=(uint32_t value) { m_value = static_cast<uintptr_t>(value); }
171#endif
172 ALWAYS_INLINE void operator=(int16_t value) { m_value = static_cast<intptr_t>(value); }
173 ALWAYS_INLINE void operator=(uint16_t value) { m_value = static_cast<uintptr_t>(value); }
174 ALWAYS_INLINE void operator=(int8_t value) { m_value = static_cast<intptr_t>(value); }
175 ALWAYS_INLINE void operator=(uint8_t value) { m_value = static_cast<uintptr_t>(value); }
176 ALWAYS_INLINE void operator=(bool value) { m_value = static_cast<uintptr_t>(value); }
177
178#if USE(JSVALUE64)
179 ALWAYS_INLINE double bitsAsDouble() const { return bitwise_cast<double>(m_value); }
180 ALWAYS_INLINE int64_t bitsAsInt64() const { return bitwise_cast<int64_t>(m_value); }
181#endif
182
183private:
184 uintptr_t m_value { static_cast<uintptr_t>(0xbadbeef0baddbeef) };
185};
186
187class CLoopDoubleRegister {
188public:
189 template<typename T>
190 explicit operator T() const { return bitwise_cast<T>(m_value); }
191
192 ALWAYS_INLINE double d() const { return m_value; }
193 ALWAYS_INLINE int64_t bitsAsInt64() const { return bitwise_cast<int64_t>(m_value); }
194
195 ALWAYS_INLINE void operator=(double value) { m_value = value; }
196
197 template<typename T, typename = std::enable_if_t<sizeof(T) == sizeof(uintptr_t) && std::is_integral<T>::value>>
198 ALWAYS_INLINE void operator=(T value) { m_value = bitwise_cast<double>(value); }
199
200private:
201 double m_value;
202};
203
204//============================================================================
205// Some utilities:
206//
207
208namespace LLInt {
209
210#if USE(JSVALUE32_64)
211static double ints2Double(uint32_t lo, uint32_t hi)
212{
213 uint64_t value = (static_cast<uint64_t>(hi) << 32) | lo;
214 return bitwise_cast<double>(value);
215}
216
217static void double2Ints(double val, CLoopRegister& lo, CLoopRegister& hi)
218{
219 uint64_t value = bitwise_cast<uint64_t>(val);
220 hi = static_cast<uint32_t>(value >> 32);
221 lo = static_cast<uint32_t>(value);
222}
223#endif // USE(JSVALUE32_64)
224
225static void decodeResult(SlowPathReturnType result, CLoopRegister& t0, CLoopRegister& t1)
226{
227 const void* t0Result;
228 const void* t1Result;
229 JSC::decodeResult(result, t0Result, t1Result);
230 t0 = t0Result;
231 t1 = t1Result;
232}
233
234} // namespace LLint
235
236//============================================================================
237// The llint C++ interpreter loop:
238//
239
240JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass)
241{
242#define CAST bitwise_cast
243
244 // One-time initialization of our address tables. We have to put this code
245 // here because our labels are only in scope inside this function. The
246 // caller (or one of its ancestors) is responsible for ensuring that this
247 // is only called once during the initialization of the VM before threads
248 // are at play.
249 if (UNLIKELY(isInitializationPass)) {
250 Opcode* opcodeMap = LLInt::opcodeMap();
251 Opcode* opcodeMapWide16 = LLInt::opcodeMapWide16();
252 Opcode* opcodeMapWide32 = LLInt::opcodeMapWide32();
253
254#if ENABLE(COMPUTED_GOTO_OPCODES)
255 #define OPCODE_ENTRY(__opcode, length) \
256 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode); \
257 opcodeMapWide16[__opcode] = bitwise_cast<void*>(&&__opcode##_wide16); \
258 opcodeMapWide32[__opcode] = bitwise_cast<void*>(&&__opcode##_wide32);
259
260 #define LLINT_OPCODE_ENTRY(__opcode, length) \
261 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
262#else
263 // FIXME: this mapping is unnecessarily expensive in the absence of COMPUTED_GOTO
264 // narrow opcodes don't need any mapping and wide opcodes just need to add numOpcodeIDs
265 #define OPCODE_ENTRY(__opcode, length) \
266 opcodeMap[__opcode] = __opcode; \
267 opcodeMapWide16[__opcode] = static_cast<OpcodeID>(__opcode##_wide16); \
268 opcodeMapWide32[__opcode] = static_cast<OpcodeID>(__opcode##_wide32);
269
270 #define LLINT_OPCODE_ENTRY(__opcode, length) \
271 opcodeMap[__opcode] = __opcode;
272#endif
273 FOR_EACH_BYTECODE_ID(OPCODE_ENTRY)
274 FOR_EACH_CLOOP_BYTECODE_HELPER_ID(LLINT_OPCODE_ENTRY)
275 FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
276 #undef OPCODE_ENTRY
277 #undef LLINT_OPCODE_ENTRY
278
279 // Note: we can only set the exceptionInstructions after we have
280 // initialized the opcodeMap above. This is because getCodePtr()
281 // can depend on the opcodeMap.
282 uint8_t* exceptionInstructions = reinterpret_cast<uint8_t*>(LLInt::exceptionInstructions());
283 for (int i = 0; i < maxOpcodeLength + 1; ++i)
284 exceptionInstructions[i] = llint_throw_from_slow_path_trampoline;
285
286 return JSValue();
287 }
288
289 // Define the pseudo registers used by the LLINT C Loop backend:
290 static_assert(sizeof(CLoopRegister) == sizeof(intptr_t));
291
292 // The CLoop llint backend is initially based on the ARMv7 backend, and
293 // then further enhanced with a few instructions from the x86 backend to
294 // support building for X64 targets. Hence, the shape of the generated
295 // code and the usage convention of registers will look a lot like the
296 // ARMv7 backend's.
297 //
298 // For example, on a 32-bit build:
299 // 1. Outgoing args will be set up as follows:
300 // arg1 in t0 (r0 on ARM)
301 // arg2 in t1 (r1 on ARM)
302 // 2. 32 bit return values will be in t0 (r0 on ARM).
303 // 3. 64 bit return values (e.g. doubles) will be in t0,t1 (r0,r1 on ARM).
304 //
305 // But instead of naming these simulator registers based on their ARM
306 // counterparts, we'll name them based on their original llint asm names.
307 // This will make it easier to correlate the generated code with the
308 // original llint asm code.
309 //
310 // On a 64-bit build, it more like x64 in that the registers are 64 bit.
311 // Hence:
312 // 1. Outgoing args are still the same: arg1 in t0, arg2 in t1, etc.
313 // 2. 32 bit result values will be in the low 32-bit of t0.
314 // 3. 64 bit result values will be in t0.
315
316 CLoopRegister t0, t1, t2, t3, t5, sp, cfr, lr, pc;
317#if USE(JSVALUE64)
318 CLoopRegister pcBase, numberTag, notCellMask;
319#endif
320 CLoopRegister metadataTable;
321 CLoopDoubleRegister d0, d1;
322
323 struct StackPointerScope {
324 StackPointerScope(CLoopStack& stack)
325 : m_stack(stack)
326 , m_originalStackPointer(stack.currentStackPointer())
327 { }
328
329 ~StackPointerScope()
330 {
331 m_stack.setCurrentStackPointer(m_originalStackPointer);
332 }
333
334 private:
335 CLoopStack& m_stack;
336 void* m_originalStackPointer;
337 };
338
339 CLoopStack& cloopStack = vm->interpreter->cloopStack();
340 StackPointerScope stackPointerScope(cloopStack);
341
342 lr = getOpcode(llint_return_to_host);
343 sp = cloopStack.currentStackPointer();
344 cfr = vm->topCallFrame;
345#ifndef NDEBUG
346 void* startSP = sp.vp();
347 CallFrame* startCFR = cfr.callFrame();
348#endif
349
350 // Initialize the incoming args for doVMEntryToJavaScript:
351 t0 = executableAddress;
352 t1 = vm;
353 t2 = protoCallFrame;
354
355#if USE(JSVALUE64)
356 // For the ASM llint, JITStubs takes care of this initialization. We do
357 // it explicitly here for the C loop:
358 numberTag = JSValue::NumberTag;
359 notCellMask = JSValue::NotCellMask;
360#endif // USE(JSVALUE64)
361
362 // Interpreter variables for value passing between opcodes and/or helpers:
363 NativeFunction nativeFunc = nullptr;
364 JSValue functionReturnValue;
365 Opcode opcode = getOpcode(entryOpcodeID);
366
367#define PUSH(cloopReg) \
368 do { \
369 sp = sp.ip() - 1; \
370 *sp.ip() = cloopReg.i(); \
371 } while (false)
372
373#define POP(cloopReg) \
374 do { \
375 cloopReg = *sp.ip(); \
376 sp = sp.ip() + 1; \
377 } while (false)
378
379#if ENABLE(OPCODE_STATS)
380#define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
381#else
382#define RECORD_OPCODE_STATS(__opcode)
383#endif
384
385#if USE(JSVALUE32_64)
386#define FETCH_OPCODE() *pc.i8p
387#else // USE(JSVALUE64)
388#define FETCH_OPCODE() *bitwise_cast<OpcodeID*>(pcBase.i8p + pc.i)
389#endif // USE(JSVALUE64)
390
391#define NEXT_INSTRUCTION() \
392 do { \
393 opcode = FETCH_OPCODE(); \
394 DISPATCH_OPCODE(); \
395 } while (false)
396
397#if ENABLE(COMPUTED_GOTO_OPCODES)
398
399 //========================================================================
400 // Loop dispatch mechanism using computed goto statements:
401
402 #define DISPATCH_OPCODE() goto *opcode
403
404 #define DEFINE_OPCODE(__opcode) \
405 __opcode: \
406 RECORD_OPCODE_STATS(__opcode);
407
408 // Dispatch to the current PC's bytecode:
409 DISPATCH_OPCODE();
410
411#else // !ENABLE(COMPUTED_GOTO_OPCODES)
412 //========================================================================
413 // Loop dispatch mechanism using a C switch statement:
414
415 #define DISPATCH_OPCODE() goto dispatchOpcode
416
417 #define DEFINE_OPCODE(__opcode) \
418 case __opcode: \
419 __opcode: \
420 RECORD_OPCODE_STATS(__opcode);
421
422 // Dispatch to the current PC's bytecode:
423 dispatchOpcode:
424 switch (static_cast<unsigned>(opcode))
425
426#endif // !ENABLE(COMPUTED_GOTO_OPCODES)
427
428 //========================================================================
429 // Bytecode handlers:
430 {
431 // This is the file generated by offlineasm, which contains all of the
432 // bytecode handlers for the interpreter, as compiled from
433 // LowLevelInterpreter.asm and its peers.
434
435 IGNORE_CLANG_WARNINGS_BEGIN("unreachable-code")
436 #include "LLIntAssembly.h"
437 IGNORE_CLANG_WARNINGS_END
438
439 OFFLINE_ASM_GLUE_LABEL(llint_return_to_host)
440 {
441 ASSERT(startSP == sp.vp());
442 ASSERT(startCFR == cfr.callFrame());
443#if USE(JSVALUE32_64)
444 return JSValue(t1.i(), t0.i()); // returning JSValue(tag, payload);
445#else
446 return JSValue::decode(t0.encodedJSValue());
447#endif
448 }
449
450 // In the ASM llint, getHostCallReturnValue() is a piece of glue
451 // function provided by the JIT (see jit/JITOperations.cpp).
452 // We simulate it here with a pseduo-opcode handler.
453 OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
454 {
455 // The part in getHostCallReturnValueWithExecState():
456 JSValue result = vm->hostCallReturnValue;
457#if USE(JSVALUE32_64)
458 t1 = result.tag();
459 t0 = result.payload();
460#else
461 t0 = JSValue::encode(result);
462#endif
463 opcode = lr.opcode();
464 DISPATCH_OPCODE();
465 }
466
467#if !ENABLE(COMPUTED_GOTO_OPCODES)
468 default:
469 ASSERT(false);
470#endif
471
472 } // END bytecode handler cases.
473
474#if ENABLE(COMPUTED_GOTO_OPCODES)
475 // Keep the compiler happy so that it doesn't complain about unused
476 // labels for the LLInt trampoline glue. The labels are automatically
477 // emitted by label macros above, and some of them are referenced by
478 // the llint generated code. Since we can't tell ahead of time which
479 // will be referenced and which will be not, we'll just passify the
480 // compiler on all such labels:
481 #define LLINT_OPCODE_ENTRY(__opcode, length) \
482 UNUSED_LABEL(__opcode);
483 FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
484 #undef LLINT_OPCODE_ENTRY
485#endif
486
487 #undef NEXT_INSTRUCTION
488 #undef DEFINE_OPCODE
489 #undef CHECK_FOR_TIMEOUT
490 #undef CAST
491
492 return JSValue(); // to suppress a compiler warning.
493} // Interpreter::llintCLoopExecute()
494
495} // namespace JSC
496
497#elif !COMPILER(MSVC)
498
499//============================================================================
500// Define the opcode dispatch mechanism when using an ASM loop:
501//
502
503// These are for building an interpreter from generated assembly code:
504#define OFFLINE_ASM_BEGIN asm (
505#define OFFLINE_ASM_END );
506
507#if USE(LLINT_EMBEDDED_OPCODE_ID)
508#define EMBED_OPCODE_ID_IF_NEEDED(__opcode) ".int " __opcode##_value_string "\n"
509#else
510#define EMBED_OPCODE_ID_IF_NEEDED(__opcode)
511#endif
512
513#define OFFLINE_ASM_OPCODE_LABEL(__opcode) \
514 EMBED_OPCODE_ID_IF_NEEDED(__opcode) \
515 OFFLINE_ASM_OPCODE_DEBUG_LABEL(llint_##__opcode) \
516 OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
517
518#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
519
520#if CPU(ARM_THUMB2)
521#define OFFLINE_ASM_GLOBAL_LABEL(label) \
522 ".text\n" \
523 ".align 4\n" \
524 ".globl " SYMBOL_STRING(label) "\n" \
525 HIDE_SYMBOL(label) "\n" \
526 ".thumb\n" \
527 ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
528 SYMBOL_STRING(label) ":\n"
529#elif CPU(ARM64)
530#define OFFLINE_ASM_GLOBAL_LABEL(label) \
531 ".text\n" \
532 ".align 4\n" \
533 ".globl " SYMBOL_STRING(label) "\n" \
534 HIDE_SYMBOL(label) "\n" \
535 SYMBOL_STRING(label) ":\n"
536#else
537#define OFFLINE_ASM_GLOBAL_LABEL(label) \
538 ".text\n" \
539 ".globl " SYMBOL_STRING(label) "\n" \
540 HIDE_SYMBOL(label) "\n" \
541 SYMBOL_STRING(label) ":\n"
542#endif
543
544#define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
545
546#if OS(LINUX)
547#define OFFLINE_ASM_OPCODE_DEBUG_LABEL(label) #label ":\n"
548#else
549#define OFFLINE_ASM_OPCODE_DEBUG_LABEL(label)
550#endif
551
552// This is a file generated by offlineasm, which contains all of the assembly code
553// for the interpreter, as compiled from LowLevelInterpreter.asm.
554#include "LLIntAssembly.h"
555
556#endif // ENABLE(C_LOOP)
557