1/*
2 * Copyright (C) 2008-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27
28#if ENABLE(JIT)
29
30#include "JIT.h"
31
32#include "BytecodeGraph.h"
33#include "BytecodeLivenessAnalysis.h"
34#include "CodeBlock.h"
35#include "CodeBlockWithJITType.h"
36#include "DFGCapabilities.h"
37#include "InterpreterInlines.h"
38#include "JITInlines.h"
39#include "JITOperations.h"
40#include "JSArray.h"
41#include "JSCInlines.h"
42#include "JSFunction.h"
43#include "LinkBuffer.h"
44#include "MaxFrameExtentForSlowPathCall.h"
45#include "ModuleProgramCodeBlock.h"
46#include "PCToCodeOriginMap.h"
47#include "ProbeContext.h"
48#include "ProfilerDatabase.h"
49#include "ProgramCodeBlock.h"
50#include "ResultType.h"
51#include "SlowPathCall.h"
52#include "StackAlignment.h"
53#include "ThunkGenerators.h"
54#include "TypeProfilerLog.h"
55#include <wtf/CryptographicallyRandomNumber.h>
56#include <wtf/GraphNodeWorklist.h>
57#include <wtf/SimpleStats.h>
58
59namespace JSC {
60namespace JITInternal {
61static constexpr const bool verbose = false;
62}
63
64Seconds totalBaselineCompileTime;
65Seconds totalDFGCompileTime;
66Seconds totalFTLCompileTime;
67Seconds totalFTLDFGCompileTime;
68Seconds totalFTLB3CompileTime;
69
70void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr<CFunctionPtrTag> newCalleeFunction)
71{
72 MacroAssembler::repatchCall(
73 CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)),
74 newCalleeFunction.retagged<OperationPtrTag>());
75}
76
77JIT::JIT(VM* vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset)
78 : JSInterfaceJIT(vm, codeBlock)
79 , m_interpreter(vm->interpreter)
80 , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
81 , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
82 , m_pcToCodeOriginMapBuilder(*vm)
83 , m_canBeOptimized(false)
84 , m_shouldEmitProfiling(false)
85 , m_shouldUseIndexMasking(Options::enableSpectreMitigations())
86 , m_loopOSREntryBytecodeOffset(loopOSREntryBytecodeOffset)
87{
88}
89
90JIT::~JIT()
91{
92}
93
94#if ENABLE(DFG_JIT)
95void JIT::emitEnterOptimizationCheck()
96{
97 if (!canBeOptimized())
98 return;
99
100 JumpList skipOptimize;
101
102 skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
103 ASSERT(!m_bytecodeOffset);
104
105 copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
106
107 callOperation(operationOptimize, m_bytecodeOffset);
108 skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
109 jump(returnValueGPR, GPRInfo::callFrameRegister);
110 skipOptimize.link(this);
111}
112#endif
113
114void JIT::emitNotifyWrite(WatchpointSet* set)
115{
116 if (!set || set->state() == IsInvalidated) {
117 addSlowCase(Jump());
118 return;
119 }
120
121 addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
122}
123
124void JIT::emitNotifyWrite(GPRReg pointerToSet)
125{
126 addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
127}
128
129void JIT::assertStackPointerOffset()
130{
131 if (ASSERT_DISABLED)
132 return;
133
134 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
135 Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
136 breakpoint();
137 ok.link(this);
138}
139
140#define NEXT_OPCODE(name) \
141 m_bytecodeOffset += currentInstruction->size(); \
142 break;
143
144#define DEFINE_SLOW_OP(name) \
145 case op_##name: { \
146 if (m_bytecodeOffset >= startBytecodeOffset) { \
147 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
148 slowPathCall.call(); \
149 } \
150 NEXT_OPCODE(op_##name); \
151 }
152
153#define DEFINE_OP(name) \
154 case name: { \
155 if (m_bytecodeOffset >= startBytecodeOffset) { \
156 emit_##name(currentInstruction); \
157 } \
158 NEXT_OPCODE(name); \
159 }
160
161#define DEFINE_SLOWCASE_OP(name) \
162 case name: { \
163 emitSlow_##name(currentInstruction, iter); \
164 NEXT_OPCODE(name); \
165 }
166
167#define DEFINE_SLOWCASE_SLOW_OP(name) \
168 case op_##name: { \
169 emitSlowCaseCall(currentInstruction, iter, slow_path_##name); \
170 NEXT_OPCODE(op_##name); \
171 }
172
173void JIT::emitSlowCaseCall(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, SlowPathFunction stub)
174{
175 linkAllSlowCases(iter);
176
177 JITSlowPathCall slowPathCall(this, currentInstruction, stub);
178 slowPathCall.call();
179}
180
181void JIT::privateCompileMainPass()
182{
183 if (JITInternal::verbose)
184 dataLog("Compiling ", *m_codeBlock, "\n");
185
186 jitAssertTagsInPlace();
187 jitAssertArgumentCountSane();
188
189 auto& instructions = m_codeBlock->instructions();
190 unsigned instructionCount = m_codeBlock->instructions().size();
191
192 m_callLinkInfoIndex = 0;
193
194 VM& vm = *m_codeBlock->vm();
195 unsigned startBytecodeOffset = 0;
196 if (m_loopOSREntryBytecodeOffset && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) {
197 // We can only do this optimization because we execute ProgramCodeBlock's exactly once.
198 // This optimization would be invalid otherwise. When the LLInt determines it wants to
199 // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it
200 // was executing at when it kicked off our compilation. We only need to compile code for
201 // anything reachable from that bytecode offset.
202
203 // We only bother building the bytecode graph if it could save time and executable
204 // memory. We pick an arbitrary offset where we deem this is profitable.
205 if (m_loopOSREntryBytecodeOffset >= 200) {
206 // As a simplification, we don't find all bytecode ranges that are unreachable.
207 // Instead, we just find the minimum bytecode offset that is reachable, and
208 // compile code from that bytecode offset onwards.
209
210 BytecodeGraph graph(m_codeBlock, m_codeBlock->instructions());
211 BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeOffset);
212 RELEASE_ASSERT(block);
213
214 GraphNodeWorklist<BytecodeBasicBlock*> worklist;
215 startBytecodeOffset = UINT_MAX;
216 worklist.push(block);
217
218 while (BytecodeBasicBlock* block = worklist.pop()) {
219 startBytecodeOffset = std::min(startBytecodeOffset, block->leaderOffset());
220 worklist.pushAll(block->successors());
221
222 // Also add catch blocks for bytecodes that throw.
223 if (m_codeBlock->numberOfExceptionHandlers()) {
224 for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
225 auto instruction = instructions.at(bytecodeOffset);
226 if (auto* handler = m_codeBlock->handlerForBytecodeOffset(bytecodeOffset))
227 worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target));
228
229 bytecodeOffset += instruction->size();
230 }
231 }
232 }
233 }
234 }
235
236 for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
237 if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) {
238 // We've proven all bytecode instructions up until here are unreachable.
239 // Let's ensure that by crashing if it's ever hit.
240 breakpoint();
241 }
242
243 if (m_disassembler)
244 m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
245 const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr();
246 ASSERT_WITH_MESSAGE(currentInstruction->size(), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
247
248 m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
249
250#if ENABLE(OPCODE_SAMPLING)
251 if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
252 sampleInstruction(currentInstruction);
253#endif
254
255 m_labels[m_bytecodeOffset] = label();
256
257 if (JITInternal::verbose)
258 dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
259
260 OpcodeID opcodeID = currentInstruction->opcodeID();
261
262 if (UNLIKELY(m_compilation)) {
263 add64(
264 TrustedImm32(1),
265 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
266 m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
267 }
268
269 if (Options::eagerlyUpdateTopCallFrame())
270 updateTopCallFrame();
271
272 unsigned bytecodeOffset = m_bytecodeOffset;
273#if ENABLE(MASM_PROBE)
274 if (UNLIKELY(Options::traceBaselineJITExecution())) {
275 CodeBlock* codeBlock = m_codeBlock;
276 probe([=] (Probe::Context& ctx) {
277 dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock);
278 });
279 }
280#endif
281
282 switch (opcodeID) {
283 DEFINE_SLOW_OP(in_by_val)
284 DEFINE_SLOW_OP(less)
285 DEFINE_SLOW_OP(lesseq)
286 DEFINE_SLOW_OP(greater)
287 DEFINE_SLOW_OP(greatereq)
288 DEFINE_SLOW_OP(is_function)
289 DEFINE_SLOW_OP(is_object_or_null)
290 DEFINE_SLOW_OP(typeof)
291 DEFINE_SLOW_OP(strcat)
292 DEFINE_SLOW_OP(push_with_scope)
293 DEFINE_SLOW_OP(create_lexical_environment)
294 DEFINE_SLOW_OP(get_by_val_with_this)
295 DEFINE_SLOW_OP(put_by_id_with_this)
296 DEFINE_SLOW_OP(put_by_val_with_this)
297 DEFINE_SLOW_OP(resolve_scope_for_hoisting_func_decl_in_eval)
298 DEFINE_SLOW_OP(define_data_property)
299 DEFINE_SLOW_OP(define_accessor_property)
300 DEFINE_SLOW_OP(unreachable)
301 DEFINE_SLOW_OP(throw_static_error)
302 DEFINE_SLOW_OP(new_array_with_spread)
303 DEFINE_SLOW_OP(new_array_buffer)
304 DEFINE_SLOW_OP(spread)
305 DEFINE_SLOW_OP(get_enumerable_length)
306 DEFINE_SLOW_OP(has_generic_property)
307 DEFINE_SLOW_OP(get_property_enumerator)
308 DEFINE_SLOW_OP(to_index_string)
309 DEFINE_SLOW_OP(create_direct_arguments)
310 DEFINE_SLOW_OP(create_scoped_arguments)
311 DEFINE_SLOW_OP(create_cloned_arguments)
312 DEFINE_SLOW_OP(create_rest)
313 DEFINE_SLOW_OP(pow)
314
315 DEFINE_OP(op_add)
316 DEFINE_OP(op_bitnot)
317 DEFINE_OP(op_bitand)
318 DEFINE_OP(op_bitor)
319 DEFINE_OP(op_bitxor)
320 DEFINE_OP(op_call)
321 DEFINE_OP(op_tail_call)
322 DEFINE_OP(op_call_eval)
323 DEFINE_OP(op_call_varargs)
324 DEFINE_OP(op_tail_call_varargs)
325 DEFINE_OP(op_tail_call_forward_arguments)
326 DEFINE_OP(op_construct_varargs)
327 DEFINE_OP(op_catch)
328 DEFINE_OP(op_construct)
329 DEFINE_OP(op_create_this)
330 DEFINE_OP(op_to_this)
331 DEFINE_OP(op_get_argument)
332 DEFINE_OP(op_argument_count)
333 DEFINE_OP(op_get_rest_length)
334 DEFINE_OP(op_check_tdz)
335 DEFINE_OP(op_identity_with_profile)
336 DEFINE_OP(op_debug)
337 DEFINE_OP(op_del_by_id)
338 DEFINE_OP(op_del_by_val)
339 DEFINE_OP(op_div)
340 DEFINE_OP(op_end)
341 DEFINE_OP(op_enter)
342 DEFINE_OP(op_get_scope)
343 DEFINE_OP(op_eq)
344 DEFINE_OP(op_eq_null)
345 DEFINE_OP(op_below)
346 DEFINE_OP(op_beloweq)
347 DEFINE_OP(op_try_get_by_id)
348 DEFINE_OP(op_in_by_id)
349 DEFINE_OP(op_get_by_id)
350 DEFINE_OP(op_get_by_id_with_this)
351 DEFINE_OP(op_get_by_id_direct)
352 DEFINE_OP(op_get_by_val)
353 DEFINE_OP(op_overrides_has_instance)
354 DEFINE_OP(op_instanceof)
355 DEFINE_OP(op_instanceof_custom)
356 DEFINE_OP(op_is_empty)
357 DEFINE_OP(op_is_undefined)
358 DEFINE_OP(op_is_undefined_or_null)
359 DEFINE_OP(op_is_boolean)
360 DEFINE_OP(op_is_number)
361 DEFINE_OP(op_is_object)
362 DEFINE_OP(op_is_cell_with_type)
363 DEFINE_OP(op_jeq_null)
364 DEFINE_OP(op_jfalse)
365 DEFINE_OP(op_jmp)
366 DEFINE_OP(op_jneq_null)
367 DEFINE_OP(op_jneq_ptr)
368 DEFINE_OP(op_jless)
369 DEFINE_OP(op_jlesseq)
370 DEFINE_OP(op_jgreater)
371 DEFINE_OP(op_jgreatereq)
372 DEFINE_OP(op_jnless)
373 DEFINE_OP(op_jnlesseq)
374 DEFINE_OP(op_jngreater)
375 DEFINE_OP(op_jngreatereq)
376 DEFINE_OP(op_jeq)
377 DEFINE_OP(op_jneq)
378 DEFINE_OP(op_jstricteq)
379 DEFINE_OP(op_jnstricteq)
380 DEFINE_OP(op_jbelow)
381 DEFINE_OP(op_jbeloweq)
382 DEFINE_OP(op_jtrue)
383 DEFINE_OP(op_loop_hint)
384 DEFINE_OP(op_check_traps)
385 DEFINE_OP(op_nop)
386 DEFINE_OP(op_super_sampler_begin)
387 DEFINE_OP(op_super_sampler_end)
388 DEFINE_OP(op_lshift)
389 DEFINE_OP(op_mod)
390 DEFINE_OP(op_mov)
391 DEFINE_OP(op_mul)
392 DEFINE_OP(op_negate)
393 DEFINE_OP(op_neq)
394 DEFINE_OP(op_neq_null)
395 DEFINE_OP(op_new_array)
396 DEFINE_OP(op_new_array_with_size)
397 DEFINE_OP(op_new_func)
398 DEFINE_OP(op_new_func_exp)
399 DEFINE_OP(op_new_generator_func)
400 DEFINE_OP(op_new_generator_func_exp)
401 DEFINE_OP(op_new_async_func)
402 DEFINE_OP(op_new_async_func_exp)
403 DEFINE_OP(op_new_async_generator_func)
404 DEFINE_OP(op_new_async_generator_func_exp)
405 DEFINE_OP(op_new_object)
406 DEFINE_OP(op_new_regexp)
407 DEFINE_OP(op_not)
408 DEFINE_OP(op_nstricteq)
409 DEFINE_OP(op_dec)
410 DEFINE_OP(op_inc)
411 DEFINE_OP(op_profile_type)
412 DEFINE_OP(op_profile_control_flow)
413 DEFINE_OP(op_get_parent_scope)
414 DEFINE_OP(op_put_by_id)
415 DEFINE_OP(op_put_by_val_direct)
416 DEFINE_OP(op_put_by_val)
417 DEFINE_OP(op_put_getter_by_id)
418 DEFINE_OP(op_put_setter_by_id)
419 DEFINE_OP(op_put_getter_setter_by_id)
420 DEFINE_OP(op_put_getter_by_val)
421 DEFINE_OP(op_put_setter_by_val)
422
423 DEFINE_OP(op_ret)
424 DEFINE_OP(op_rshift)
425 DEFINE_OP(op_unsigned)
426 DEFINE_OP(op_urshift)
427 DEFINE_OP(op_set_function_name)
428 DEFINE_OP(op_stricteq)
429 DEFINE_OP(op_sub)
430 DEFINE_OP(op_switch_char)
431 DEFINE_OP(op_switch_imm)
432 DEFINE_OP(op_switch_string)
433 DEFINE_OP(op_throw)
434 DEFINE_OP(op_to_number)
435 DEFINE_OP(op_to_string)
436 DEFINE_OP(op_to_object)
437 DEFINE_OP(op_to_primitive)
438
439 DEFINE_OP(op_resolve_scope)
440 DEFINE_OP(op_get_from_scope)
441 DEFINE_OP(op_put_to_scope)
442 DEFINE_OP(op_get_from_arguments)
443 DEFINE_OP(op_put_to_arguments)
444
445 DEFINE_OP(op_has_structure_property)
446 DEFINE_OP(op_has_indexed_property)
447 DEFINE_OP(op_get_direct_pname)
448 DEFINE_OP(op_enumerator_structure_pname)
449 DEFINE_OP(op_enumerator_generic_pname)
450
451 DEFINE_OP(op_log_shadow_chicken_prologue)
452 DEFINE_OP(op_log_shadow_chicken_tail)
453 default:
454 RELEASE_ASSERT_NOT_REACHED();
455 }
456
457 if (JITInternal::verbose)
458 dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
459 }
460
461 RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
462
463#ifndef NDEBUG
464 // Reset this, in order to guard its use with ASSERTs.
465 m_bytecodeOffset = std::numeric_limits<unsigned>::max();
466#endif
467}
468
469void JIT::privateCompileLinkPass()
470{
471 unsigned jmpTableCount = m_jmpTable.size();
472 for (unsigned i = 0; i < jmpTableCount; ++i)
473 m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
474 m_jmpTable.clear();
475}
476
477void JIT::privateCompileSlowCases()
478{
479 m_getByIdIndex = 0;
480 m_getByIdWithThisIndex = 0;
481 m_putByIdIndex = 0;
482 m_inByIdIndex = 0;
483 m_instanceOfIndex = 0;
484 m_byValInstructionIndex = 0;
485 m_callLinkInfoIndex = 0;
486
487 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
488 m_bytecodeOffset = iter->to;
489
490 m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
491
492 unsigned firstTo = m_bytecodeOffset;
493
494 const Instruction* currentInstruction = m_codeBlock->instructions().at(m_bytecodeOffset).ptr();
495
496 RareCaseProfile* rareCaseProfile = 0;
497 if (shouldEmitProfiling())
498 rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
499
500 if (JITInternal::verbose)
501 dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
502
503 if (m_disassembler)
504 m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label());
505
506#if ENABLE(MASM_PROBE)
507 if (UNLIKELY(Options::traceBaselineJITExecution())) {
508 OpcodeID opcodeID = currentInstruction->opcodeID();
509 unsigned bytecodeOffset = m_bytecodeOffset;
510 CodeBlock* codeBlock = m_codeBlock;
511 probe([=] (Probe::Context& ctx) {
512 dataLogLn("JIT [", bytecodeOffset, "] SLOW ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock);
513 });
514 }
515#endif
516
517 switch (currentInstruction->opcodeID()) {
518 DEFINE_SLOWCASE_OP(op_add)
519 DEFINE_SLOWCASE_OP(op_call)
520 DEFINE_SLOWCASE_OP(op_tail_call)
521 DEFINE_SLOWCASE_OP(op_call_eval)
522 DEFINE_SLOWCASE_OP(op_call_varargs)
523 DEFINE_SLOWCASE_OP(op_tail_call_varargs)
524 DEFINE_SLOWCASE_OP(op_tail_call_forward_arguments)
525 DEFINE_SLOWCASE_OP(op_construct_varargs)
526 DEFINE_SLOWCASE_OP(op_construct)
527 DEFINE_SLOWCASE_OP(op_eq)
528 DEFINE_SLOWCASE_OP(op_try_get_by_id)
529 DEFINE_SLOWCASE_OP(op_in_by_id)
530 DEFINE_SLOWCASE_OP(op_get_by_id)
531 DEFINE_SLOWCASE_OP(op_get_by_id_with_this)
532 DEFINE_SLOWCASE_OP(op_get_by_id_direct)
533 DEFINE_SLOWCASE_OP(op_get_by_val)
534 DEFINE_SLOWCASE_OP(op_instanceof)
535 DEFINE_SLOWCASE_OP(op_instanceof_custom)
536 DEFINE_SLOWCASE_OP(op_jless)
537 DEFINE_SLOWCASE_OP(op_jlesseq)
538 DEFINE_SLOWCASE_OP(op_jgreater)
539 DEFINE_SLOWCASE_OP(op_jgreatereq)
540 DEFINE_SLOWCASE_OP(op_jnless)
541 DEFINE_SLOWCASE_OP(op_jnlesseq)
542 DEFINE_SLOWCASE_OP(op_jngreater)
543 DEFINE_SLOWCASE_OP(op_jngreatereq)
544 DEFINE_SLOWCASE_OP(op_jeq)
545 DEFINE_SLOWCASE_OP(op_jneq)
546 DEFINE_SLOWCASE_OP(op_jstricteq)
547 DEFINE_SLOWCASE_OP(op_jnstricteq)
548 DEFINE_SLOWCASE_OP(op_loop_hint)
549 DEFINE_SLOWCASE_OP(op_check_traps)
550 DEFINE_SLOWCASE_OP(op_mod)
551 DEFINE_SLOWCASE_OP(op_mul)
552 DEFINE_SLOWCASE_OP(op_negate)
553 DEFINE_SLOWCASE_OP(op_neq)
554 DEFINE_SLOWCASE_OP(op_new_object)
555 DEFINE_SLOWCASE_OP(op_put_by_id)
556 case op_put_by_val_direct:
557 DEFINE_SLOWCASE_OP(op_put_by_val)
558 DEFINE_SLOWCASE_OP(op_sub)
559 DEFINE_SLOWCASE_OP(op_has_indexed_property)
560 DEFINE_SLOWCASE_OP(op_get_from_scope)
561 DEFINE_SLOWCASE_OP(op_put_to_scope)
562
563 DEFINE_SLOWCASE_SLOW_OP(unsigned)
564 DEFINE_SLOWCASE_SLOW_OP(inc)
565 DEFINE_SLOWCASE_SLOW_OP(dec)
566 DEFINE_SLOWCASE_SLOW_OP(bitnot)
567 DEFINE_SLOWCASE_SLOW_OP(bitand)
568 DEFINE_SLOWCASE_SLOW_OP(bitor)
569 DEFINE_SLOWCASE_SLOW_OP(bitxor)
570 DEFINE_SLOWCASE_SLOW_OP(lshift)
571 DEFINE_SLOWCASE_SLOW_OP(rshift)
572 DEFINE_SLOWCASE_SLOW_OP(urshift)
573 DEFINE_SLOWCASE_SLOW_OP(div)
574 DEFINE_SLOWCASE_SLOW_OP(create_this)
575 DEFINE_SLOWCASE_SLOW_OP(to_this)
576 DEFINE_SLOWCASE_SLOW_OP(to_primitive)
577 DEFINE_SLOWCASE_SLOW_OP(to_number)
578 DEFINE_SLOWCASE_SLOW_OP(to_string)
579 DEFINE_SLOWCASE_SLOW_OP(to_object)
580 DEFINE_SLOWCASE_SLOW_OP(not)
581 DEFINE_SLOWCASE_SLOW_OP(stricteq)
582 DEFINE_SLOWCASE_SLOW_OP(nstricteq)
583 DEFINE_SLOWCASE_SLOW_OP(get_direct_pname)
584 DEFINE_SLOWCASE_SLOW_OP(has_structure_property)
585 DEFINE_SLOWCASE_SLOW_OP(resolve_scope)
586 DEFINE_SLOWCASE_SLOW_OP(check_tdz)
587
588 default:
589 RELEASE_ASSERT_NOT_REACHED();
590 }
591
592 if (JITInternal::verbose)
593 dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
594
595 RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
596 RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
597
598 if (shouldEmitProfiling())
599 add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
600
601 emitJumpSlowToHot(jump(), 0);
602 }
603
604 RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
605 RELEASE_ASSERT(m_getByIdWithThisIndex == m_getByIdsWithThis.size());
606 RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
607 RELEASE_ASSERT(m_inByIdIndex == m_inByIds.size());
608 RELEASE_ASSERT(m_instanceOfIndex == m_instanceOfs.size());
609 RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
610
611#ifndef NDEBUG
612 // Reset this, in order to guard its use with ASSERTs.
613 m_bytecodeOffset = std::numeric_limits<unsigned>::max();
614#endif
615}
616
617void JIT::compileWithoutLinking(JITCompilationEffort effort)
618{
619 MonotonicTime before { };
620 if (UNLIKELY(computeCompileTimes()))
621 before = MonotonicTime::now();
622
623 DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
624 switch (level) {
625 case DFG::CannotCompile:
626 m_canBeOptimized = false;
627 m_canBeOptimizedOrInlined = false;
628 m_shouldEmitProfiling = false;
629 break;
630 case DFG::CanCompile:
631 case DFG::CanCompileAndInline:
632 m_canBeOptimized = true;
633 m_canBeOptimizedOrInlined = true;
634 m_shouldEmitProfiling = true;
635 break;
636 default:
637 RELEASE_ASSERT_NOT_REACHED();
638 break;
639 }
640
641 switch (m_codeBlock->codeType()) {
642 case GlobalCode:
643 case ModuleCode:
644 case EvalCode:
645 m_codeBlock->m_shouldAlwaysBeInlined = false;
646 break;
647 case FunctionCode:
648 // We could have already set it to false because we detected an uninlineable call.
649 // Don't override that observation.
650 m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
651 break;
652 }
653
654 if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler())))
655 m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
656 if (UNLIKELY(m_vm->m_perBytecodeProfiler)) {
657 m_compilation = adoptRef(
658 new Profiler::Compilation(
659 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
660 Profiler::Baseline));
661 m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
662 }
663
664 m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
665
666 Label entryLabel(this);
667 if (m_disassembler)
668 m_disassembler->setStartOfCode(entryLabel);
669
670 // Just add a little bit of randomness to the codegen
671 if (random() & 1)
672 nop();
673
674 emitFunctionPrologue();
675 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
676
677 Label beginLabel(this);
678
679 sampleCodeBlock(m_codeBlock);
680#if ENABLE(OPCODE_SAMPLING)
681 sampleInstruction(m_codeBlock->instructions().begin());
682#endif
683
684 int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register);
685 unsigned maxFrameSize = -frameTopOffset;
686 addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1);
687 JumpList stackOverflow;
688 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
689 stackOverflow.append(branchPtr(Above, regT1, callFrameRegister));
690 stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1));
691
692 move(regT1, stackPointerRegister);
693 checkStackPointerAlignment();
694 if (Options::zeroStackFrame())
695 clearStackFrame(callFrameRegister, stackPointerRegister, regT0, maxFrameSize);
696
697 emitSaveCalleeSaves();
698 emitMaterializeTagCheckRegisters();
699
700 if (m_codeBlock->codeType() == FunctionCode) {
701 ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
702 if (shouldEmitProfiling()) {
703 for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
704 // If this is a constructor, then we want to put in a dummy profiling site (to
705 // keep things consistent) but we don't actually want to record the dummy value.
706 if (m_codeBlock->isConstructor() && !argument)
707 continue;
708 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
709#if USE(JSVALUE64)
710 load64(Address(callFrameRegister, offset), regT0);
711#elif USE(JSVALUE32_64)
712 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
713 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
714#endif
715 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
716 }
717 }
718 }
719
720 RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType()));
721
722 privateCompileMainPass();
723 privateCompileLinkPass();
724 privateCompileSlowCases();
725
726 if (m_disassembler)
727 m_disassembler->setEndOfSlowPath(label());
728 m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
729
730 stackOverflow.link(this);
731 m_bytecodeOffset = 0;
732 if (maxFrameExtentForSlowPathCall)
733 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
734 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
735
736 // If the number of parameters is 1, we never require arity fixup.
737 bool requiresArityFixup = m_codeBlock->m_numParameters != 1;
738 if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) {
739 m_arityCheck = label();
740 store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
741 emitFunctionPrologue();
742 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
743
744 load32(payloadFor(CallFrameSlot::argumentCount), regT1);
745 branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
746
747 m_bytecodeOffset = 0;
748
749 if (maxFrameExtentForSlowPathCall)
750 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
751 callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck);
752 if (maxFrameExtentForSlowPathCall)
753 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
754 branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
755 move(returnValueGPR, GPRInfo::argumentGPR0);
756 emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>());
757
758#if !ASSERT_DISABLED
759 m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
760#endif
761
762 jump(beginLabel);
763 } else
764 m_arityCheck = entryLabel; // Never require arity fixup.
765
766 ASSERT(m_jmpTable.isEmpty());
767
768 privateCompileExceptionHandlers();
769
770 if (m_disassembler)
771 m_disassembler->setEndOfCode(label());
772 m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
773
774 m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_codeBlock, effort));
775
776 MonotonicTime after { };
777 if (UNLIKELY(computeCompileTimes())) {
778 after = MonotonicTime::now();
779
780 if (Options::reportTotalCompileTimes())
781 totalBaselineCompileTime += after - before;
782 }
783 if (UNLIKELY(reportCompileTimes())) {
784 CString codeBlockName = toCString(*m_codeBlock);
785
786 dataLog("Optimized ", codeBlockName, " with Baseline JIT into ", m_linkBuffer->size(), " bytes in ", (after - before).milliseconds(), " ms.\n");
787 }
788}
789
790CompilationResult JIT::link()
791{
792 LinkBuffer& patchBuffer = *m_linkBuffer;
793
794 if (patchBuffer.didFailToAllocate())
795 return CompilationFailed;
796
797 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
798 for (auto& record : m_switches) {
799 unsigned bytecodeOffset = record.bytecodeOffset;
800
801 if (record.type != SwitchRecord::String) {
802 ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
803 ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
804
805 auto* simpleJumpTable = record.jumpTable.simpleJumpTable;
806 simpleJumpTable->ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]);
807
808 for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
809 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
810 simpleJumpTable->ctiOffsets[j] = offset
811 ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset])
812 : simpleJumpTable->ctiDefault;
813 }
814 } else {
815 ASSERT(record.type == SwitchRecord::String);
816
817 auto* stringJumpTable = record.jumpTable.stringJumpTable;
818 stringJumpTable->ctiDefault =
819 patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]);
820
821 for (auto& location : stringJumpTable->offsetTable.values()) {
822 unsigned offset = location.branchOffset;
823 location.ctiOffset = offset
824 ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset])
825 : stringJumpTable->ctiDefault;
826 }
827 }
828 }
829
830 for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
831 HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
832 // FIXME: <rdar://problem/39433318>.
833 handler.nativeCode = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_labels[handler.target]);
834 }
835
836 for (auto& record : m_calls) {
837 if (record.callee)
838 patchBuffer.link(record.from, record.callee);
839 }
840
841 finalizeInlineCaches(m_getByIds, patchBuffer);
842 finalizeInlineCaches(m_getByIdsWithThis, patchBuffer);
843 finalizeInlineCaches(m_putByIds, patchBuffer);
844 finalizeInlineCaches(m_inByIds, patchBuffer);
845 finalizeInlineCaches(m_instanceOfs, patchBuffer);
846
847 if (m_byValCompilationInfo.size()) {
848 CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionHandler);
849
850 for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
851 PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
852 auto notIndexJump = CodeLocationJump<JSInternalPtrTag>();
853 if (Jump(patchableNotIndexJump).isSet())
854 notIndexJump = CodeLocationJump<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(patchableNotIndexJump));
855 auto badTypeJump = CodeLocationJump<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(byValCompilationInfo.badTypeJump));
856 auto doneTarget = CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(byValCompilationInfo.doneTarget));
857 auto nextHotPathTarget = CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(byValCompilationInfo.nextHotPathTarget));
858 auto slowPathTarget = CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(byValCompilationInfo.slowPathTarget));
859
860 *byValCompilationInfo.byValInfo = ByValInfo(
861 byValCompilationInfo.bytecodeIndex,
862 notIndexJump,
863 badTypeJump,
864 exceptionHandler,
865 byValCompilationInfo.arrayMode,
866 byValCompilationInfo.arrayProfile,
867 doneTarget,
868 nextHotPathTarget,
869 slowPathTarget);
870 }
871 }
872
873 for (auto& compilationInfo : m_callCompilationInfo) {
874 CallLinkInfo& info = *compilationInfo.callLinkInfo;
875 info.setCallLocations(
876 CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOfNearCall<JSInternalPtrTag>(compilationInfo.callReturnLocation)),
877 CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.hotPathBegin)),
878 patchBuffer.locationOfNearCall<JSInternalPtrTag>(compilationInfo.hotPathOther));
879 }
880
881 JITCodeMap jitCodeMap;
882 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
883 if (m_labels[bytecodeOffset].isSet())
884 jitCodeMap.append(bytecodeOffset, patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset]));
885 }
886 jitCodeMap.finish();
887 m_codeBlock->setJITCodeMap(WTFMove(jitCodeMap));
888
889 MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck);
890
891 if (Options::dumpDisassembly()) {
892 m_disassembler->dump(patchBuffer);
893 patchBuffer.didAlreadyDisassemble();
894 }
895 if (UNLIKELY(m_compilation)) {
896 if (Options::disassembleBaselineForProfiler())
897 m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
898 m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, *m_compilation);
899 }
900
901 if (m_pcToCodeOriginMapBuilder.didBuildMapping())
902 m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
903
904 CodeRef<JSEntryPtrTag> result = FINALIZE_CODE(
905 patchBuffer, JSEntryPtrTag,
906 "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITType::BaselineJIT)).data());
907
908 m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add(
909 static_cast<double>(result.size()) /
910 static_cast<double>(m_codeBlock->instructionsSize()));
911
912 m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
913 m_codeBlock->setJITCode(
914 adoptRef(*new DirectJITCode(result, withArityCheck, JITType::BaselineJIT)));
915
916 if (JITInternal::verbose)
917 dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr());
918
919 return CompilationSuccessful;
920}
921
922CompilationResult JIT::privateCompile(JITCompilationEffort effort)
923{
924 doMainThreadPreparationBeforeCompile();
925 compileWithoutLinking(effort);
926 return link();
927}
928
929void JIT::privateCompileExceptionHandlers()
930{
931 if (!m_exceptionChecksWithCallFrameRollback.empty()) {
932 m_exceptionChecksWithCallFrameRollback.link(this);
933
934 copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
935
936 // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
937
938 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
939 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
940
941#if CPU(X86)
942 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
943 poke(GPRInfo::argumentGPR0);
944 poke(GPRInfo::argumentGPR1, 1);
945#endif
946 m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame)));
947 jumpToExceptionHandler(*vm());
948 }
949
950 if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) {
951 m_exceptionHandler = label();
952 m_exceptionChecks.link(this);
953
954 copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame);
955
956 // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
957 move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
958 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
959
960#if CPU(X86)
961 // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
962 poke(GPRInfo::argumentGPR0);
963 poke(GPRInfo::argumentGPR1, 1);
964#endif
965 m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandler)));
966 jumpToExceptionHandler(*vm());
967 }
968}
969
970void JIT::doMainThreadPreparationBeforeCompile()
971{
972 // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
973 if (m_vm->typeProfiler())
974 m_vm->typeProfilerLog()->processLogEntries(*m_vm, "Preparing for JIT compilation."_s);
975}
976
977unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
978{
979 ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals())));
980
981 return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters);
982}
983
984int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
985{
986 return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
987}
988
989bool JIT::reportCompileTimes()
990{
991 return Options::reportCompileTimes() || Options::reportBaselineCompileTimes();
992}
993
994bool JIT::computeCompileTimes()
995{
996 return reportCompileTimes() || Options::reportTotalCompileTimes();
997}
998
999HashMap<CString, Seconds> JIT::compileTimeStats()
1000{
1001 HashMap<CString, Seconds> result;
1002 if (Options::reportTotalCompileTimes()) {
1003 result.add("Total Compile Time", totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime);
1004 result.add("Baseline Compile Time", totalBaselineCompileTime);
1005#if ENABLE(DFG_JIT)
1006 result.add("DFG Compile Time", totalDFGCompileTime);
1007#if ENABLE(FTL_JIT)
1008 result.add("FTL Compile Time", totalFTLCompileTime);
1009 result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
1010 result.add("FTL (B3) Compile Time", totalFTLB3CompileTime);
1011#endif // ENABLE(FTL_JIT)
1012#endif // ENABLE(DFG_JIT)
1013 }
1014 return result;
1015}
1016
1017Seconds JIT::totalCompileTime()
1018{
1019 return totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime;
1020}
1021
1022} // namespace JSC
1023
1024#endif // ENABLE(JIT)
1025