1 | /* |
2 | * Copyright (C) 2013-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #include "DFGCommon.h" |
29 | |
30 | #if ENABLE(FTL_JIT) |
31 | |
32 | #include "B3BasicBlockInlines.h" |
33 | #include "B3CCallValue.h" |
34 | #include "B3Compilation.h" |
35 | #include "B3FrequentedBlock.h" |
36 | #include "B3Procedure.h" |
37 | #include "B3SwitchValue.h" |
38 | #include "B3Width.h" |
39 | #include "FTLAbbreviatedTypes.h" |
40 | #include "FTLAbstractHeapRepository.h" |
41 | #include "FTLCommonValues.h" |
42 | #include "FTLState.h" |
43 | #include "FTLSwitchCase.h" |
44 | #include "FTLTypedPointer.h" |
45 | #include "FTLValueFromBlock.h" |
46 | #include "FTLWeight.h" |
47 | #include "FTLWeightedTarget.h" |
48 | #include "HeapCell.h" |
49 | #include <wtf/OrderMaker.h> |
50 | #include <wtf/StringPrintStream.h> |
51 | |
52 | // FIXME: remove this once everything can be generated through B3. |
53 | IGNORE_WARNINGS_BEGIN("missing-noreturn" ) |
54 | ALLOW_UNUSED_PARAMETERS_BEGIN |
55 | |
56 | namespace JSC { |
57 | |
58 | namespace DFG { |
59 | struct Node; |
60 | } // namespace DFG |
61 | |
62 | namespace B3 { |
63 | class FenceValue; |
64 | class SlotBaseValue; |
65 | } // namespace B3 |
66 | |
67 | namespace FTL { |
68 | |
69 | enum Scale { ScaleOne, ScaleTwo, ScaleFour, ScaleEight, ScalePtr }; |
70 | |
71 | class Output : public CommonValues { |
72 | public: |
73 | Output(State&); |
74 | ~Output(); |
75 | |
76 | void initialize(AbstractHeapRepository&); |
77 | |
78 | void setFrequency(double value) |
79 | { |
80 | m_frequency = value; |
81 | } |
82 | |
83 | LBasicBlock newBlock(); |
84 | |
85 | LBasicBlock insertNewBlocksBefore(LBasicBlock nextBlock) |
86 | { |
87 | LBasicBlock lastNextBlock = m_nextBlock; |
88 | m_nextBlock = nextBlock; |
89 | return lastNextBlock; |
90 | } |
91 | |
92 | void applyBlockOrder(); |
93 | |
94 | LBasicBlock appendTo(LBasicBlock, LBasicBlock nextBlock); |
95 | void appendTo(LBasicBlock); |
96 | |
97 | void setOrigin(DFG::Node* node) { m_origin = node; } |
98 | B3::Origin origin() { return B3::Origin(m_origin); } |
99 | |
100 | LValue framePointer(); |
101 | |
102 | B3::SlotBaseValue* lockedStackSlot(size_t bytes); |
103 | |
104 | LValue constBool(bool value); |
105 | LValue constInt32(int32_t value); |
106 | |
107 | LValue alreadyRegisteredWeakPointer(DFG::Graph& graph, JSCell* cell) |
108 | { |
109 | ASSERT(graph.m_plan.weakReferences().contains(cell)); |
110 | |
111 | return constIntPtr(bitwise_cast<intptr_t>(cell)); |
112 | } |
113 | |
114 | LValue alreadyRegisteredFrozenPointer(DFG::FrozenValue* value) |
115 | { |
116 | RELEASE_ASSERT(value->value().isCell()); |
117 | |
118 | return constIntPtr(bitwise_cast<intptr_t>(value->cell())); |
119 | } |
120 | |
121 | template<typename T> |
122 | LValue constIntPtr(T* value) |
123 | { |
124 | static_assert(!std::is_base_of<HeapCell, T>::value, "To use a GC pointer, the graph must be aware of it. Use gcPointer instead and make sure the graph is aware of this reference." ); |
125 | if (sizeof(void*) == 8) |
126 | return constInt64(bitwise_cast<intptr_t>(value)); |
127 | return constInt32(bitwise_cast<intptr_t>(value)); |
128 | } |
129 | template<typename T> |
130 | LValue constIntPtr(T value) |
131 | { |
132 | if (sizeof(void*) == 8) |
133 | return constInt64(static_cast<intptr_t>(value)); |
134 | return constInt32(static_cast<intptr_t>(value)); |
135 | } |
136 | LValue constInt64(int64_t value); |
137 | LValue constDouble(double value); |
138 | |
139 | LValue phi(LType); |
140 | template<typename... Params> |
141 | LValue phi(LType, ValueFromBlock, Params... theRest); |
142 | template<typename VectorType> |
143 | LValue phi(LType, const VectorType&); |
144 | void addIncomingToPhi(LValue phi, ValueFromBlock); |
145 | template<typename... Params> |
146 | void addIncomingToPhi(LValue phi, ValueFromBlock, Params... theRest); |
147 | |
148 | LValue opaque(LValue); |
149 | |
150 | LValue add(LValue, LValue); |
151 | LValue sub(LValue, LValue); |
152 | LValue mul(LValue, LValue); |
153 | LValue div(LValue, LValue); |
154 | LValue chillDiv(LValue, LValue); |
155 | LValue mod(LValue, LValue); |
156 | LValue chillMod(LValue, LValue); |
157 | LValue neg(LValue); |
158 | |
159 | LValue doubleAdd(LValue, LValue); |
160 | LValue doubleSub(LValue, LValue); |
161 | LValue doubleMul(LValue, LValue); |
162 | LValue doubleDiv(LValue, LValue); |
163 | LValue doubleMod(LValue, LValue); |
164 | LValue doubleNeg(LValue value) { return neg(value); } |
165 | |
166 | LValue bitAnd(LValue, LValue); |
167 | LValue bitOr(LValue, LValue); |
168 | LValue bitXor(LValue, LValue); |
169 | LValue shl(LValue, LValue shiftAmount); |
170 | LValue aShr(LValue, LValue shiftAmount); |
171 | LValue lShr(LValue, LValue shiftAmount); |
172 | LValue bitNot(LValue); |
173 | LValue logicalNot(LValue); |
174 | |
175 | LValue ctlz32(LValue); |
176 | LValue doubleAbs(LValue); |
177 | LValue doubleCeil(LValue); |
178 | LValue doubleFloor(LValue); |
179 | LValue doubleTrunc(LValue); |
180 | |
181 | LValue doubleUnary(DFG::Arith::UnaryType, LValue); |
182 | |
183 | LValue doublePow(LValue base, LValue exponent); |
184 | LValue doublePowi(LValue base, LValue exponent); |
185 | |
186 | LValue doubleSqrt(LValue); |
187 | |
188 | LValue doubleLog(LValue); |
189 | |
190 | LValue doubleToInt(LValue); |
191 | LValue doubleToInt64(LValue); |
192 | LValue doubleToUInt(LValue); |
193 | |
194 | LValue signExt32To64(LValue); |
195 | LValue signExt32ToPtr(LValue); |
196 | LValue zeroExt(LValue, LType); |
197 | LValue zeroExtPtr(LValue value) { return zeroExt(value, B3::Int64); } |
198 | LValue intToDouble(LValue); |
199 | LValue unsignedToDouble(LValue); |
200 | LValue castToInt32(LValue); |
201 | LValue doubleToFloat(LValue); |
202 | LValue floatToDouble(LValue); |
203 | LValue bitCast(LValue, LType); |
204 | LValue fround(LValue); |
205 | |
206 | LValue load(TypedPointer, LType); |
207 | LValue store(LValue, TypedPointer); |
208 | B3::FenceValue* fence(const AbstractHeap* read, const AbstractHeap* write); |
209 | |
210 | LValue load8SignExt32(TypedPointer); |
211 | LValue load8ZeroExt32(TypedPointer); |
212 | LValue load16SignExt32(TypedPointer); |
213 | LValue load16ZeroExt32(TypedPointer); |
214 | LValue load32(TypedPointer pointer) { return load(pointer, B3::Int32); } |
215 | LValue load64(TypedPointer pointer) { return load(pointer, B3::Int64); } |
216 | LValue loadPtr(TypedPointer pointer) { return load(pointer, B3::pointerType()); } |
217 | LValue loadFloat(TypedPointer pointer) { return load(pointer, B3::Float); } |
218 | LValue loadDouble(TypedPointer pointer) { return load(pointer, B3::Double); } |
219 | LValue store32As8(LValue, TypedPointer); |
220 | LValue store32As16(LValue, TypedPointer); |
221 | LValue store32(LValue value, TypedPointer pointer) |
222 | { |
223 | ASSERT(value->type() == B3::Int32); |
224 | return store(value, pointer); |
225 | } |
226 | LValue store64(LValue value, TypedPointer pointer) |
227 | { |
228 | ASSERT(value->type() == B3::Int64); |
229 | return store(value, pointer); |
230 | } |
231 | LValue storePtr(LValue value, TypedPointer pointer) |
232 | { |
233 | ASSERT(value->type() == B3::pointerType()); |
234 | return store(value, pointer); |
235 | } |
236 | LValue storeFloat(LValue value, TypedPointer pointer) |
237 | { |
238 | ASSERT(value->type() == B3::Float); |
239 | return store(value, pointer); |
240 | } |
241 | LValue storeDouble(LValue value, TypedPointer pointer) |
242 | { |
243 | ASSERT(value->type() == B3::Double); |
244 | return store(value, pointer); |
245 | } |
246 | |
247 | enum LoadType { |
248 | Load8SignExt32, |
249 | Load8ZeroExt32, |
250 | Load16SignExt32, |
251 | Load16ZeroExt32, |
252 | Load32, |
253 | Load64, |
254 | LoadPtr, |
255 | LoadFloat, |
256 | LoadDouble |
257 | }; |
258 | |
259 | LValue load(TypedPointer, LoadType); |
260 | |
261 | enum StoreType { |
262 | Store32As8, |
263 | Store32As16, |
264 | Store32, |
265 | Store64, |
266 | StorePtr, |
267 | StoreFloat, |
268 | StoreDouble |
269 | }; |
270 | |
271 | LValue store(LValue, TypedPointer, StoreType); |
272 | |
273 | LValue addPtr(LValue value, ptrdiff_t immediate = 0) |
274 | { |
275 | if (!immediate) |
276 | return value; |
277 | return add(value, constIntPtr(immediate)); |
278 | } |
279 | |
280 | // Construct an address by offsetting base by the requested amount and ascribing |
281 | // the requested abstract heap to it. |
282 | TypedPointer address(const AbstractHeap& heap, LValue base, ptrdiff_t offset = 0) |
283 | { |
284 | return TypedPointer(heap, addPtr(base, offset)); |
285 | } |
286 | // Construct an address by offsetting base by the amount specified by the field, |
287 | // and optionally an additional amount (use this with care), and then creating |
288 | // a TypedPointer with the given field as the heap. |
289 | TypedPointer address(LValue base, const AbstractHeap& field, ptrdiff_t offset = 0) |
290 | { |
291 | return address(field, base, offset + field.offset()); |
292 | } |
293 | |
294 | LValue baseIndex(LValue base, LValue index, Scale, ptrdiff_t offset = 0); |
295 | |
296 | TypedPointer baseIndex(const AbstractHeap& heap, LValue base, LValue index, Scale scale, ptrdiff_t offset = 0) |
297 | { |
298 | return TypedPointer(heap, baseIndex(base, index, scale, offset)); |
299 | } |
300 | TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue base, LValue index, JSValue indexAsConstant = JSValue(), ptrdiff_t offset = 0, LValue mask = nullptr) |
301 | { |
302 | return heap.baseIndex(*this, base, index, indexAsConstant, offset, mask); |
303 | } |
304 | |
305 | TypedPointer absolute(const void* address); |
306 | |
307 | LValue load8SignExt32(LValue base, const AbstractHeap& field) { return load8SignExt32(address(base, field)); } |
308 | LValue load8ZeroExt32(LValue base, const AbstractHeap& field) { return load8ZeroExt32(address(base, field)); } |
309 | LValue load16SignExt32(LValue base, const AbstractHeap& field) { return load16SignExt32(address(base, field)); } |
310 | LValue load16ZeroExt32(LValue base, const AbstractHeap& field) { return load16ZeroExt32(address(base, field)); } |
311 | LValue load32(LValue base, const AbstractHeap& field) { return load32(address(base, field)); } |
312 | LValue load64(LValue base, const AbstractHeap& field) { return load64(address(base, field)); } |
313 | LValue loadPtr(LValue base, const AbstractHeap& field) { return loadPtr(address(base, field)); } |
314 | LValue loadDouble(LValue base, const AbstractHeap& field) { return loadDouble(address(base, field)); } |
315 | void store32As8(LValue value, LValue base, const AbstractHeap& field) { store32As8(value, address(base, field)); } |
316 | void store32As16(LValue value, LValue base, const AbstractHeap& field) { store32As16(value, address(base, field)); } |
317 | void store32(LValue value, LValue base, const AbstractHeap& field) { store32(value, address(base, field)); } |
318 | void store64(LValue value, LValue base, const AbstractHeap& field) { store64(value, address(base, field)); } |
319 | void storePtr(LValue value, LValue base, const AbstractHeap& field) { storePtr(value, address(base, field)); } |
320 | void storeDouble(LValue value, LValue base, const AbstractHeap& field) { storeDouble(value, address(base, field)); } |
321 | |
322 | // FIXME: Explore adding support for value range constraints to B3. Maybe it could be as simple as having |
323 | // a load instruction that guarantees that its result is non-negative. |
324 | // https://bugs.webkit.org/show_bug.cgi?id=151458 |
325 | void ascribeRange(LValue, const ValueRange&) { } |
326 | LValue nonNegative32(LValue loadInstruction) { return loadInstruction; } |
327 | LValue load32NonNegative(TypedPointer pointer) { return load32(pointer); } |
328 | LValue load32NonNegative(LValue base, const AbstractHeap& field) { return load32(base, field); } |
329 | |
330 | LValue equal(LValue, LValue); |
331 | LValue notEqual(LValue, LValue); |
332 | LValue above(LValue, LValue); |
333 | LValue aboveOrEqual(LValue, LValue); |
334 | LValue below(LValue, LValue); |
335 | LValue belowOrEqual(LValue, LValue); |
336 | LValue greaterThan(LValue, LValue); |
337 | LValue greaterThanOrEqual(LValue, LValue); |
338 | LValue lessThan(LValue, LValue); |
339 | LValue lessThanOrEqual(LValue, LValue); |
340 | |
341 | LValue doubleEqual(LValue, LValue); |
342 | LValue doubleEqualOrUnordered(LValue, LValue); |
343 | LValue doubleNotEqualOrUnordered(LValue, LValue); |
344 | LValue doubleLessThan(LValue, LValue); |
345 | LValue doubleLessThanOrEqual(LValue, LValue); |
346 | LValue doubleGreaterThan(LValue, LValue); |
347 | LValue doubleGreaterThanOrEqual(LValue, LValue); |
348 | LValue doubleNotEqualAndOrdered(LValue, LValue); |
349 | LValue doubleLessThanOrUnordered(LValue, LValue); |
350 | LValue doubleLessThanOrEqualOrUnordered(LValue, LValue); |
351 | LValue doubleGreaterThanOrUnordered(LValue, LValue); |
352 | LValue doubleGreaterThanOrEqualOrUnordered(LValue, LValue); |
353 | |
354 | LValue isZero32(LValue); |
355 | LValue notZero32(LValue); |
356 | LValue isZero64(LValue); |
357 | LValue notZero64(LValue); |
358 | LValue isNull(LValue value) { return isZero64(value); } |
359 | LValue notNull(LValue value) { return notZero64(value); } |
360 | |
361 | LValue testIsZero32(LValue value, LValue mask) { return isZero32(bitAnd(value, mask)); } |
362 | LValue testNonZero32(LValue value, LValue mask) { return notZero32(bitAnd(value, mask)); } |
363 | LValue testIsZero64(LValue value, LValue mask) { return isZero64(bitAnd(value, mask)); } |
364 | LValue testNonZero64(LValue value, LValue mask) { return notZero64(bitAnd(value, mask)); } |
365 | LValue testIsZeroPtr(LValue value, LValue mask) { return isNull(bitAnd(value, mask)); } |
366 | LValue testNonZeroPtr(LValue value, LValue mask) { return notNull(bitAnd(value, mask)); } |
367 | |
368 | LValue select(LValue value, LValue taken, LValue notTaken); |
369 | |
370 | // These are relaxed atomics by default. Use AbstractHeapRepository::decorateFencedAccess() with a |
371 | // non-null heap to make them seq_cst fenced. |
372 | LValue atomicXchgAdd(LValue operand, TypedPointer pointer, B3::Width); |
373 | LValue atomicXchgAnd(LValue operand, TypedPointer pointer, B3::Width); |
374 | LValue atomicXchgOr(LValue operand, TypedPointer pointer, B3::Width); |
375 | LValue atomicXchgSub(LValue operand, TypedPointer pointer, B3::Width); |
376 | LValue atomicXchgXor(LValue operand, TypedPointer pointer, B3::Width); |
377 | LValue atomicXchg(LValue operand, TypedPointer pointer, B3::Width); |
378 | LValue atomicStrongCAS(LValue expected, LValue newValue, TypedPointer pointer, B3::Width); |
379 | |
380 | template<typename VectorType> |
381 | LValue call(LType type, LValue function, const VectorType& vector) |
382 | { |
383 | B3::CCallValue* result = m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function); |
384 | result->appendArgs(vector); |
385 | return result; |
386 | } |
387 | LValue call(LType type, LValue function) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function); } |
388 | LValue call(LType type, LValue function, LValue arg1) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function, arg1); } |
389 | template<typename... Args> |
390 | LValue call(LType type, LValue function, LValue arg1, Args... args) { return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), function, arg1, args...); } |
391 | |
392 | template<typename Function, typename... Args> |
393 | LValue callWithoutSideEffects(B3::Type type, Function function, LValue arg1, Args... args) |
394 | { |
395 | static_assert(!std::is_same<Function, LValue>::value); |
396 | return m_block->appendNew<B3::CCallValue>(m_proc, type, origin(), B3::Effects::none(), |
397 | constIntPtr(tagCFunctionPtr<void*>(function, B3CCallPtrTag)), arg1, args...); |
398 | } |
399 | |
400 | // FIXME: Consider enhancing this to allow the client to choose the target PtrTag to use. |
401 | // https://bugs.webkit.org/show_bug.cgi?id=184324 |
402 | template<typename FunctionType> |
403 | LValue operation(FunctionType function) { return constIntPtr(tagCFunctionPtr<void*>(function, B3CCallPtrTag)); } |
404 | |
405 | void jump(LBasicBlock); |
406 | void branch(LValue condition, LBasicBlock taken, Weight takenWeight, LBasicBlock notTaken, Weight notTakenWeight); |
407 | void branch(LValue condition, WeightedTarget taken, WeightedTarget notTaken) |
408 | { |
409 | branch(condition, taken.target(), taken.weight(), notTaken.target(), notTaken.weight()); |
410 | } |
411 | |
412 | // Branches to an already-created handler if true, "falls through" if false. Fall-through is |
413 | // simulated by creating a continuation for you. |
414 | void check(LValue condition, WeightedTarget taken, Weight notTakenWeight); |
415 | |
416 | // Same as check(), but uses Weight::inverse() to compute the notTakenWeight. |
417 | void check(LValue condition, WeightedTarget taken); |
418 | |
419 | template<typename VectorType> |
420 | void switchInstruction(LValue value, const VectorType& cases, LBasicBlock fallThrough, Weight fallThroughWeight) |
421 | { |
422 | B3::SwitchValue* switchValue = m_block->appendNew<B3::SwitchValue>(m_proc, origin(), value); |
423 | switchValue->setFallThrough(B3::FrequentedBlock(fallThrough)); |
424 | for (const SwitchCase& switchCase : cases) { |
425 | int64_t value = switchCase.value()->asInt(); |
426 | B3::FrequentedBlock target(switchCase.target(), switchCase.weight().frequencyClass()); |
427 | switchValue->appendCase(B3::SwitchCase(value, target)); |
428 | } |
429 | } |
430 | |
431 | void entrySwitch(const Vector<LBasicBlock>&); |
432 | |
433 | void ret(LValue); |
434 | |
435 | void unreachable(); |
436 | |
437 | void appendSuccessor(WeightedTarget); |
438 | |
439 | B3::CheckValue* speculate(LValue); |
440 | B3::CheckValue* speculateAdd(LValue, LValue); |
441 | B3::CheckValue* speculateSub(LValue, LValue); |
442 | B3::CheckValue* speculateMul(LValue, LValue); |
443 | |
444 | B3::PatchpointValue* patchpoint(LType); |
445 | |
446 | void trap(); |
447 | |
448 | ValueFromBlock anchor(LValue); |
449 | |
450 | void incrementSuperSamplerCount(); |
451 | void decrementSuperSamplerCount(); |
452 | |
453 | #if PLATFORM(COCOA) |
454 | #pragma mark - States |
455 | #endif |
456 | B3::Procedure& m_proc; |
457 | |
458 | DFG::Node* m_origin { nullptr }; |
459 | LBasicBlock m_block { nullptr }; |
460 | LBasicBlock m_nextBlock { nullptr }; |
461 | |
462 | AbstractHeapRepository* m_heaps; |
463 | |
464 | double m_frequency { 1 }; |
465 | |
466 | private: |
467 | OrderMaker<LBasicBlock> m_blockOrder; |
468 | }; |
469 | |
470 | template<typename... Params> |
471 | inline LValue Output::phi(LType type, ValueFromBlock value, Params... theRest) |
472 | { |
473 | LValue phiNode = phi(type); |
474 | addIncomingToPhi(phiNode, value, theRest...); |
475 | return phiNode; |
476 | } |
477 | |
478 | template<typename VectorType> |
479 | inline LValue Output::phi(LType type, const VectorType& vector) |
480 | { |
481 | LValue phiNode = phi(type); |
482 | for (const ValueFromBlock& valueFromBlock : vector) |
483 | addIncomingToPhi(phiNode, valueFromBlock); |
484 | return phiNode; |
485 | } |
486 | |
487 | template<typename... Params> |
488 | inline void Output::addIncomingToPhi(LValue phi, ValueFromBlock value, Params... theRest) |
489 | { |
490 | addIncomingToPhi(phi, value); |
491 | addIncomingToPhi(phi, theRest...); |
492 | } |
493 | |
494 | ALLOW_UNUSED_PARAMETERS_END |
495 | IGNORE_WARNINGS_END |
496 | |
497 | } } // namespace JSC::FTL |
498 | |
499 | #endif // ENABLE(FTL_JIT) |
500 | |