1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27
28#if ENABLE(JIT)
29
30#include "JIT.h"
31
32#include "BytecodeGraph.h"
33#include "BytecodeLivenessAnalysis.h"
34#include "CodeBlock.h"
35#include "CodeBlockWithJITType.h"
36#include "DFGCapabilities.h"
37#include "InterpreterInlines.h"
38#include "JITInlines.h"
39#include "JITOperations.h"
40#include "JSArray.h"
41#include "JSCInlines.h"
42#include "JSFunction.h"
43#include "LinkBuffer.h"
44#include "MaxFrameExtentForSlowPathCall.h"
45#include "ModuleProgramCodeBlock.h"
46#include "PCToCodeOriginMap.h"
47#include "ProbeContext.h"
48#include "ProfilerDatabase.h"
49#include "ProgramCodeBlock.h"
50#include "ResultType.h"
51#include "SlowPathCall.h"
52#include "StackAlignment.h"
53#include "ThunkGenerators.h"
54#include "TypeProfilerLog.h"
55#include <wtf/CryptographicallyRandomNumber.h>
56#include <wtf/GraphNodeWorklist.h>
57#include <wtf/SimpleStats.h>
58
59namespace JSC {
60namespace JITInternal {
61static constexpr const bool verbose = false;
62}
63
64Seconds totalBaselineCompileTime;
65Seconds totalDFGCompileTime;
66Seconds totalFTLCompileTime;
67Seconds totalFTLDFGCompileTime;
68Seconds totalFTLB3CompileTime;
69
70void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr<CFunctionPtrTag> newCalleeFunction)
71{
72 MacroAssembler::repatchCall(
73 CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)),
74 newCalleeFunction.retagged<OperationPtrTag>());
75}
76
77JIT::JIT(VM& vm, CodeBlock* codeBlock, BytecodeIndex loopOSREntryBytecodeIndex)
78 : JSInterfaceJIT(&vm, codeBlock)
79 , m_interpreter(vm.interpreter)
80 , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
81 , m_pcToCodeOriginMapBuilder(vm)
82 , m_canBeOptimized(false)
83 , m_shouldEmitProfiling(false)
84 , m_loopOSREntryBytecodeIndex(loopOSREntryBytecodeIndex)
85{
86}
87
88JIT::~JIT()
89{
90}
91
92#if ENABLE(DFG_JIT)
93void JIT::emitEnterOptimizationCheck()
94{
95 if (!canBeOptimized())
96 return;
97
98 JumpList skipOptimize;
99
100 skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
101 ASSERT(!m_bytecodeIndex.offset());
102
103 copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
104
105 callOperation(operationOptimize, &vm(), m_bytecodeIndex.asBits());
106 skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
107 farJump(returnValueGPR, GPRInfo::callFrameRegister);
108 skipOptimize.link(this);
109}
110#endif
111
112void JIT::emitNotifyWrite(WatchpointSet* set)
113{
114 if (!set || set->state() == IsInvalidated) {
115 addSlowCase(Jump());
116 return;
117 }
118
119 addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
120}
121
122void JIT::emitNotifyWrite(GPRReg pointerToSet)
123{
124 addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
125}
126
127void JIT::assertStackPointerOffset()
128{
129 if (ASSERT_DISABLED)
130 return;
131
132 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
133 Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
134 breakpoint();
135 ok.link(this);
136}
137
138#define NEXT_OPCODE(name) \
139 m_bytecodeIndex = BytecodeIndex(m_bytecodeIndex.offset() + currentInstruction->size()); \
140 break;
141
142#define DEFINE_SLOW_OP(name) \
143 case op_##name: { \
144 if (m_bytecodeIndex >= startBytecodeIndex) { \
145 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
146 slowPathCall.call(); \
147 } \
148 NEXT_OPCODE(op_##name); \
149 }
150
151#define DEFINE_OP(name) \
152 case name: { \
153 if (m_bytecodeIndex >= startBytecodeIndex) { \
154 emit_##name(currentInstruction); \
155 } \
156 NEXT_OPCODE(name); \
157 }
158
159#define DEFINE_SLOWCASE_OP(name) \
160 case name: { \
161 emitSlow_##name(currentInstruction, iter); \
162 NEXT_OPCODE(name); \
163 }
164
165#define DEFINE_SLOWCASE_SLOW_OP(name) \
166 case op_##name: { \
167 emitSlowCaseCall(currentInstruction, iter, slow_path_##name); \
168 NEXT_OPCODE(op_##name); \
169 }
170
171void JIT::emitSlowCaseCall(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, SlowPathFunction stub)
172{
173 linkAllSlowCases(iter);
174
175 JITSlowPathCall slowPathCall(this, currentInstruction, stub);
176 slowPathCall.call();
177}
178
179void JIT::privateCompileMainPass()
180{
181 if (JITInternal::verbose)
182 dataLog("Compiling ", *m_codeBlock, "\n");
183
184 jitAssertTagsInPlace();
185 jitAssertArgumentCountSane();
186
187 auto& instructions = m_codeBlock->instructions();
188 unsigned instructionCount = m_codeBlock->instructions().size();
189
190 m_callLinkInfoIndex = 0;
191
192 VM& vm = m_codeBlock->vm();
193 BytecodeIndex startBytecodeIndex(0);
194 if (m_loopOSREntryBytecodeIndex && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) {
195 // We can only do this optimization because we execute ProgramCodeBlock's exactly once.
196 // This optimization would be invalid otherwise. When the LLInt determines it wants to
197 // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it
198 // was executing at when it kicked off our compilation. We only need to compile code for
199 // anything reachable from that bytecode offset.
200
201 // We only bother building the bytecode graph if it could save time and executable
202 // memory. We pick an arbitrary offset where we deem this is profitable.
203 if (m_loopOSREntryBytecodeIndex.offset() >= 200) {
204 // As a simplification, we don't find all bytecode ranges that are unreachable.
205 // Instead, we just find the minimum bytecode offset that is reachable, and
206 // compile code from that bytecode offset onwards.
207
208 BytecodeGraph graph(m_codeBlock, m_codeBlock->instructions());
209 BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeIndex.offset());
210 RELEASE_ASSERT(block);
211
212 GraphNodeWorklist<BytecodeBasicBlock*> worklist;
213 startBytecodeIndex = BytecodeIndex();
214 worklist.push(block);
215
216 while (BytecodeBasicBlock* block = worklist.pop()) {
217 startBytecodeIndex = BytecodeIndex(std::min(startBytecodeIndex.offset(), block->leaderOffset()));
218 worklist.pushAll(block->successors());
219
220 // Also add catch blocks for bytecodes that throw.
221 if (m_codeBlock->numberOfExceptionHandlers()) {
222 for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) {
223 auto instruction = instructions.at(bytecodeOffset);
224 if (auto* handler = m_codeBlock->handlerForBytecodeIndex(BytecodeIndex(bytecodeOffset)))
225 worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target));
226
227 bytecodeOffset += instruction->size();
228 }
229 }
230 }
231 }
232 }
233
234 for (m_bytecodeIndex = BytecodeIndex(0); m_bytecodeIndex.offset() < instructionCount; ) {
235 if (m_bytecodeIndex == startBytecodeIndex && startBytecodeIndex.offset() > 0) {
236 // We've proven all bytecode instructions up until here are unreachable.
237 // Let's ensure that by crashing if it's ever hit.
238 breakpoint();
239 }
240
241 if (m_disassembler)
242 m_disassembler->setForBytecodeMainPath(m_bytecodeIndex.offset(), label());
243 const Instruction* currentInstruction = instructions.at(m_bytecodeIndex).ptr();
244 ASSERT(currentInstruction->size());
245
246 m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeIndex));
247
248#if ENABLE(OPCODE_SAMPLING)
249 if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice.
250 sampleInstruction(currentInstruction);
251#endif
252
253 m_labels[m_bytecodeIndex.offset()] = label();
254
255 if (JITInternal::verbose)
256 dataLogLn("Old JIT emitting code for ", m_bytecodeIndex, " at offset ", (long)debugOffset());
257
258 OpcodeID opcodeID = currentInstruction->opcodeID();
259
260 if (UNLIKELY(m_compilation)) {
261 add64(
262 TrustedImm32(1),
263 AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
264 m_compilation->bytecodes(), m_bytecodeIndex)))->address()));
265 }
266
267 if (Options::eagerlyUpdateTopCallFrame())
268 updateTopCallFrame();
269
270 unsigned bytecodeOffset = m_bytecodeIndex.offset();
271#if ENABLE(MASM_PROBE)
272 if (UNLIKELY(Options::traceBaselineJITExecution())) {
273 CodeBlock* codeBlock = m_codeBlock;
274 probe([=] (Probe::Context& ctx) {
275 dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock);
276 });
277 }
278#endif
279
280 switch (opcodeID) {
281 DEFINE_SLOW_OP(in_by_val)
282 DEFINE_SLOW_OP(less)
283 DEFINE_SLOW_OP(lesseq)
284 DEFINE_SLOW_OP(greater)
285 DEFINE_SLOW_OP(greatereq)
286 DEFINE_SLOW_OP(is_function)
287 DEFINE_SLOW_OP(is_object_or_null)
288 DEFINE_SLOW_OP(typeof)
289 DEFINE_SLOW_OP(strcat)
290 DEFINE_SLOW_OP(push_with_scope)
291 DEFINE_SLOW_OP(create_lexical_environment)
292 DEFINE_SLOW_OP(get_by_val_with_this)
293 DEFINE_SLOW_OP(put_by_id_with_this)
294 DEFINE_SLOW_OP(put_by_val_with_this)
295 DEFINE_SLOW_OP(resolve_scope_for_hoisting_func_decl_in_eval)
296 DEFINE_SLOW_OP(define_data_property)
297 DEFINE_SLOW_OP(define_accessor_property)
298 DEFINE_SLOW_OP(unreachable)
299 DEFINE_SLOW_OP(throw_static_error)
300 DEFINE_SLOW_OP(new_array_with_spread)
301 DEFINE_SLOW_OP(new_array_buffer)
302 DEFINE_SLOW_OP(spread)
303 DEFINE_SLOW_OP(get_enumerable_length)
304 DEFINE_SLOW_OP(has_generic_property)
305 DEFINE_SLOW_OP(get_property_enumerator)
306 DEFINE_SLOW_OP(to_index_string)
307 DEFINE_SLOW_OP(create_direct_arguments)
308 DEFINE_SLOW_OP(create_scoped_arguments)
309 DEFINE_SLOW_OP(create_cloned_arguments)
310 DEFINE_SLOW_OP(create_rest)
311 DEFINE_SLOW_OP(create_promise)
312 DEFINE_SLOW_OP(new_promise)
313 DEFINE_SLOW_OP(create_generator)
314 DEFINE_SLOW_OP(create_async_generator)
315 DEFINE_SLOW_OP(new_generator)
316 DEFINE_SLOW_OP(pow)
317
318 DEFINE_OP(op_add)
319 DEFINE_OP(op_bitnot)
320 DEFINE_OP(op_bitand)
321 DEFINE_OP(op_bitor)
322 DEFINE_OP(op_bitxor)
323 DEFINE_OP(op_call)
324 DEFINE_OP(op_tail_call)
325 DEFINE_OP(op_call_eval)
326 DEFINE_OP(op_call_varargs)
327 DEFINE_OP(op_tail_call_varargs)
328 DEFINE_OP(op_tail_call_forward_arguments)
329 DEFINE_OP(op_construct_varargs)
330 DEFINE_OP(op_catch)
331 DEFINE_OP(op_construct)
332 DEFINE_OP(op_create_this)
333 DEFINE_OP(op_to_this)
334 DEFINE_OP(op_get_argument)
335 DEFINE_OP(op_argument_count)
336 DEFINE_OP(op_get_rest_length)
337 DEFINE_OP(op_check_tdz)
338 DEFINE_OP(op_identity_with_profile)
339 DEFINE_OP(op_debug)
340 DEFINE_OP(op_del_by_id)
341 DEFINE_OP(op_del_by_val)
342 DEFINE_OP(op_div)
343 DEFINE_OP(op_end)
344 DEFINE_OP(op_enter)
345 DEFINE_OP(op_get_scope)
346 DEFINE_OP(op_eq)
347 DEFINE_OP(op_eq_null)
348 DEFINE_OP(op_below)
349 DEFINE_OP(op_beloweq)
350 DEFINE_OP(op_try_get_by_id)
351 DEFINE_OP(op_in_by_id)
352 DEFINE_OP(op_get_by_id)
353 DEFINE_OP(op_get_by_id_with_this)
354 DEFINE_OP(op_get_by_id_direct)
355 DEFINE_OP(op_get_by_val)
356 DEFINE_OP(op_overrides_has_instance)
357 DEFINE_OP(op_instanceof)
358 DEFINE_OP(op_instanceof_custom)
359 DEFINE_OP(op_is_empty)
360 DEFINE_OP(op_is_undefined)
361 DEFINE_OP(op_is_undefined_or_null)
362 DEFINE_OP(op_is_boolean)
363 DEFINE_OP(op_is_number)
364 DEFINE_OP(op_is_object)
365 DEFINE_OP(op_is_cell_with_type)
366 DEFINE_OP(op_jeq_null)
367 DEFINE_OP(op_jfalse)
368 DEFINE_OP(op_jmp)
369 DEFINE_OP(op_jneq_null)
370 DEFINE_OP(op_jundefined_or_null)
371 DEFINE_OP(op_jnundefined_or_null)
372 DEFINE_OP(op_jneq_ptr)
373 DEFINE_OP(op_jless)
374 DEFINE_OP(op_jlesseq)
375 DEFINE_OP(op_jgreater)
376 DEFINE_OP(op_jgreatereq)
377 DEFINE_OP(op_jnless)
378 DEFINE_OP(op_jnlesseq)
379 DEFINE_OP(op_jngreater)
380 DEFINE_OP(op_jngreatereq)
381 DEFINE_OP(op_jeq)
382 DEFINE_OP(op_jneq)
383 DEFINE_OP(op_jstricteq)
384 DEFINE_OP(op_jnstricteq)
385 DEFINE_OP(op_jbelow)
386 DEFINE_OP(op_jbeloweq)
387 DEFINE_OP(op_jtrue)
388 DEFINE_OP(op_loop_hint)
389 DEFINE_OP(op_check_traps)
390 DEFINE_OP(op_nop)
391 DEFINE_OP(op_super_sampler_begin)
392 DEFINE_OP(op_super_sampler_end)
393 DEFINE_OP(op_lshift)
394 DEFINE_OP(op_mod)
395 DEFINE_OP(op_mov)
396 DEFINE_OP(op_mul)
397 DEFINE_OP(op_negate)
398 DEFINE_OP(op_neq)
399 DEFINE_OP(op_neq_null)
400 DEFINE_OP(op_new_array)
401 DEFINE_OP(op_new_array_with_size)
402 DEFINE_OP(op_new_func)
403 DEFINE_OP(op_new_func_exp)
404 DEFINE_OP(op_new_generator_func)
405 DEFINE_OP(op_new_generator_func_exp)
406 DEFINE_OP(op_new_async_func)
407 DEFINE_OP(op_new_async_func_exp)
408 DEFINE_OP(op_new_async_generator_func)
409 DEFINE_OP(op_new_async_generator_func_exp)
410 DEFINE_OP(op_new_object)
411 DEFINE_OP(op_new_regexp)
412 DEFINE_OP(op_not)
413 DEFINE_OP(op_nstricteq)
414 DEFINE_OP(op_dec)
415 DEFINE_OP(op_inc)
416 DEFINE_OP(op_profile_type)
417 DEFINE_OP(op_profile_control_flow)
418 DEFINE_OP(op_get_parent_scope)
419 DEFINE_OP(op_put_by_id)
420 DEFINE_OP(op_put_by_val_direct)
421 DEFINE_OP(op_put_by_val)
422 DEFINE_OP(op_put_getter_by_id)
423 DEFINE_OP(op_put_setter_by_id)
424 DEFINE_OP(op_put_getter_setter_by_id)
425 DEFINE_OP(op_put_getter_by_val)
426 DEFINE_OP(op_put_setter_by_val)
427
428 DEFINE_OP(op_get_internal_field)
429 DEFINE_OP(op_put_internal_field)
430
431 DEFINE_OP(op_ret)
432 DEFINE_OP(op_rshift)
433 DEFINE_OP(op_unsigned)
434 DEFINE_OP(op_urshift)
435 DEFINE_OP(op_set_function_name)
436 DEFINE_OP(op_stricteq)
437 DEFINE_OP(op_sub)
438 DEFINE_OP(op_switch_char)
439 DEFINE_OP(op_switch_imm)
440 DEFINE_OP(op_switch_string)
441 DEFINE_OP(op_throw)
442 DEFINE_OP(op_to_number)
443 DEFINE_OP(op_to_numeric)
444 DEFINE_OP(op_to_string)
445 DEFINE_OP(op_to_object)
446 DEFINE_OP(op_to_primitive)
447
448 DEFINE_OP(op_resolve_scope)
449 DEFINE_OP(op_get_from_scope)
450 DEFINE_OP(op_put_to_scope)
451 DEFINE_OP(op_get_from_arguments)
452 DEFINE_OP(op_put_to_arguments)
453
454 DEFINE_OP(op_has_structure_property)
455 DEFINE_OP(op_has_indexed_property)
456 DEFINE_OP(op_get_direct_pname)
457 DEFINE_OP(op_enumerator_structure_pname)
458 DEFINE_OP(op_enumerator_generic_pname)
459
460 DEFINE_OP(op_log_shadow_chicken_prologue)
461 DEFINE_OP(op_log_shadow_chicken_tail)
462 default:
463 RELEASE_ASSERT_NOT_REACHED();
464 }
465
466 if (JITInternal::verbose)
467 dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
468 }
469
470 RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
471
472#ifndef NDEBUG
473 // Reset this, in order to guard its use with ASSERTs.
474 m_bytecodeIndex = BytecodeIndex();
475#endif
476}
477
478void JIT::privateCompileLinkPass()
479{
480 unsigned jmpTableCount = m_jmpTable.size();
481 for (unsigned i = 0; i < jmpTableCount; ++i)
482 m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
483 m_jmpTable.clear();
484}
485
486void JIT::privateCompileSlowCases()
487{
488 m_getByIdIndex = 0;
489 m_getByValIndex = 0;
490 m_getByIdWithThisIndex = 0;
491 m_putByIdIndex = 0;
492 m_inByIdIndex = 0;
493 m_instanceOfIndex = 0;
494 m_byValInstructionIndex = 0;
495 m_callLinkInfoIndex = 0;
496
497 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
498 m_bytecodeIndex = iter->to;
499
500 m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeIndex));
501
502 BytecodeIndex firstTo = m_bytecodeIndex;
503
504 const Instruction* currentInstruction = m_codeBlock->instructions().at(m_bytecodeIndex).ptr();
505
506 RareCaseProfile* rareCaseProfile = 0;
507 if (shouldEmitProfiling())
508 rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeIndex);
509
510 if (JITInternal::verbose)
511 dataLogLn("Old JIT emitting slow code for ", m_bytecodeIndex, " at offset ", (long)debugOffset());
512
513 if (m_disassembler)
514 m_disassembler->setForBytecodeSlowPath(m_bytecodeIndex.offset(), label());
515
516#if ENABLE(MASM_PROBE)
517 if (UNLIKELY(Options::traceBaselineJITExecution())) {
518 OpcodeID opcodeID = currentInstruction->opcodeID();
519 unsigned bytecodeOffset = m_bytecodeIndex.offset();
520 CodeBlock* codeBlock = m_codeBlock;
521 probe([=] (Probe::Context& ctx) {
522 dataLogLn("JIT [", bytecodeOffset, "] SLOW ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock);
523 });
524 }
525#endif
526
527 switch (currentInstruction->opcodeID()) {
528 DEFINE_SLOWCASE_OP(op_add)
529 DEFINE_SLOWCASE_OP(op_call)
530 DEFINE_SLOWCASE_OP(op_tail_call)
531 DEFINE_SLOWCASE_OP(op_call_eval)
532 DEFINE_SLOWCASE_OP(op_call_varargs)
533 DEFINE_SLOWCASE_OP(op_tail_call_varargs)
534 DEFINE_SLOWCASE_OP(op_tail_call_forward_arguments)
535 DEFINE_SLOWCASE_OP(op_construct_varargs)
536 DEFINE_SLOWCASE_OP(op_construct)
537 DEFINE_SLOWCASE_OP(op_eq)
538 DEFINE_SLOWCASE_OP(op_try_get_by_id)
539 DEFINE_SLOWCASE_OP(op_in_by_id)
540 DEFINE_SLOWCASE_OP(op_get_by_id)
541 DEFINE_SLOWCASE_OP(op_get_by_id_with_this)
542 DEFINE_SLOWCASE_OP(op_get_by_id_direct)
543 DEFINE_SLOWCASE_OP(op_get_by_val)
544 DEFINE_SLOWCASE_OP(op_instanceof)
545 DEFINE_SLOWCASE_OP(op_instanceof_custom)
546 DEFINE_SLOWCASE_OP(op_jless)
547 DEFINE_SLOWCASE_OP(op_jlesseq)
548 DEFINE_SLOWCASE_OP(op_jgreater)
549 DEFINE_SLOWCASE_OP(op_jgreatereq)
550 DEFINE_SLOWCASE_OP(op_jnless)
551 DEFINE_SLOWCASE_OP(op_jnlesseq)
552 DEFINE_SLOWCASE_OP(op_jngreater)
553 DEFINE_SLOWCASE_OP(op_jngreatereq)
554 DEFINE_SLOWCASE_OP(op_jeq)
555 DEFINE_SLOWCASE_OP(op_jneq)
556 DEFINE_SLOWCASE_OP(op_jstricteq)
557 DEFINE_SLOWCASE_OP(op_jnstricteq)
558 DEFINE_SLOWCASE_OP(op_loop_hint)
559 DEFINE_SLOWCASE_OP(op_check_traps)
560 DEFINE_SLOWCASE_OP(op_mod)
561 DEFINE_SLOWCASE_OP(op_mul)
562 DEFINE_SLOWCASE_OP(op_negate)
563 DEFINE_SLOWCASE_OP(op_neq)
564 DEFINE_SLOWCASE_OP(op_new_object)
565 DEFINE_SLOWCASE_OP(op_put_by_id)
566 case op_put_by_val_direct:
567 DEFINE_SLOWCASE_OP(op_put_by_val)
568 DEFINE_SLOWCASE_OP(op_sub)
569 DEFINE_SLOWCASE_OP(op_has_indexed_property)
570 DEFINE_SLOWCASE_OP(op_get_from_scope)
571 DEFINE_SLOWCASE_OP(op_put_to_scope)
572
573 DEFINE_SLOWCASE_SLOW_OP(unsigned)
574 DEFINE_SLOWCASE_SLOW_OP(inc)
575 DEFINE_SLOWCASE_SLOW_OP(dec)
576 DEFINE_SLOWCASE_SLOW_OP(bitnot)
577 DEFINE_SLOWCASE_SLOW_OP(bitand)
578 DEFINE_SLOWCASE_SLOW_OP(bitor)
579 DEFINE_SLOWCASE_SLOW_OP(bitxor)
580 DEFINE_SLOWCASE_SLOW_OP(lshift)
581 DEFINE_SLOWCASE_SLOW_OP(rshift)
582 DEFINE_SLOWCASE_SLOW_OP(urshift)
583 DEFINE_SLOWCASE_SLOW_OP(div)
584 DEFINE_SLOWCASE_SLOW_OP(create_this)
585 DEFINE_SLOWCASE_SLOW_OP(create_promise)
586 DEFINE_SLOWCASE_SLOW_OP(create_generator)
587 DEFINE_SLOWCASE_SLOW_OP(create_async_generator)
588 DEFINE_SLOWCASE_SLOW_OP(to_this)
589 DEFINE_SLOWCASE_SLOW_OP(to_primitive)
590 DEFINE_SLOWCASE_SLOW_OP(to_number)
591 DEFINE_SLOWCASE_SLOW_OP(to_numeric)
592 DEFINE_SLOWCASE_SLOW_OP(to_string)
593 DEFINE_SLOWCASE_SLOW_OP(to_object)
594 DEFINE_SLOWCASE_SLOW_OP(not)
595 DEFINE_SLOWCASE_SLOW_OP(stricteq)
596 DEFINE_SLOWCASE_SLOW_OP(nstricteq)
597 DEFINE_SLOWCASE_SLOW_OP(get_direct_pname)
598 DEFINE_SLOWCASE_SLOW_OP(has_structure_property)
599 DEFINE_SLOWCASE_SLOW_OP(resolve_scope)
600 DEFINE_SLOWCASE_SLOW_OP(check_tdz)
601
602 default:
603 RELEASE_ASSERT_NOT_REACHED();
604 }
605
606 if (JITInternal::verbose)
607 dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
608
609 RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
610 RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
611
612 if (shouldEmitProfiling())
613 add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
614
615 emitJumpSlowToHot(jump(), 0);
616 }
617
618 RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
619 RELEASE_ASSERT(m_getByIdWithThisIndex == m_getByIdsWithThis.size());
620 RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
621 RELEASE_ASSERT(m_inByIdIndex == m_inByIds.size());
622 RELEASE_ASSERT(m_instanceOfIndex == m_instanceOfs.size());
623 RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
624
625#ifndef NDEBUG
626 // Reset this, in order to guard its use with ASSERTs.
627 m_bytecodeIndex = BytecodeIndex();
628#endif
629}
630
631void JIT::compileWithoutLinking(JITCompilationEffort effort)
632{
633 MonotonicTime before { };
634 if (UNLIKELY(computeCompileTimes()))
635 before = MonotonicTime::now();
636
637 DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
638 switch (level) {
639 case DFG::CannotCompile:
640 m_canBeOptimized = false;
641 m_canBeOptimizedOrInlined = false;
642 m_shouldEmitProfiling = false;
643 break;
644 case DFG::CanCompile:
645 case DFG::CanCompileAndInline:
646 m_canBeOptimized = true;
647 m_canBeOptimizedOrInlined = true;
648 m_shouldEmitProfiling = true;
649 break;
650 default:
651 RELEASE_ASSERT_NOT_REACHED();
652 break;
653 }
654
655 switch (m_codeBlock->codeType()) {
656 case GlobalCode:
657 case ModuleCode:
658 case EvalCode:
659 m_codeBlock->m_shouldAlwaysBeInlined = false;
660 break;
661 case FunctionCode:
662 // We could have already set it to false because we detected an uninlineable call.
663 // Don't override that observation.
664 m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
665 break;
666 }
667
668 if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler())))
669 m_disassembler = makeUnique<JITDisassembler>(m_codeBlock);
670 if (UNLIKELY(m_vm->m_perBytecodeProfiler)) {
671 m_compilation = adoptRef(
672 new Profiler::Compilation(
673 m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
674 Profiler::Baseline));
675 m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
676 }
677
678 m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(BytecodeIndex(0)));
679
680 Label entryLabel(this);
681 if (m_disassembler)
682 m_disassembler->setStartOfCode(entryLabel);
683
684 // Just add a little bit of randomness to the codegen
685 if (random() & 1)
686 nop();
687
688 emitFunctionPrologue();
689 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
690
691 Label beginLabel(this);
692
693 sampleCodeBlock(m_codeBlock);
694#if ENABLE(OPCODE_SAMPLING)
695 sampleInstruction(m_codeBlock->instructions().begin());
696#endif
697
698 int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register);
699 unsigned maxFrameSize = -frameTopOffset;
700 addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1);
701 JumpList stackOverflow;
702 if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
703 stackOverflow.append(branchPtr(Above, regT1, callFrameRegister));
704 stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1));
705
706 move(regT1, stackPointerRegister);
707 checkStackPointerAlignment();
708 if (Options::zeroStackFrame())
709 clearStackFrame(callFrameRegister, stackPointerRegister, regT0, maxFrameSize);
710
711 emitSaveCalleeSaves();
712 emitMaterializeTagCheckRegisters();
713
714 if (m_codeBlock->codeType() == FunctionCode) {
715 ASSERT(!m_bytecodeIndex);
716 if (shouldEmitProfiling()) {
717 for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
718 // If this is a constructor, then we want to put in a dummy profiling site (to
719 // keep things consistent) but we don't actually want to record the dummy value.
720 if (m_codeBlock->isConstructor() && !argument)
721 continue;
722 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
723#if USE(JSVALUE64)
724 load64(Address(callFrameRegister, offset), regT0);
725#elif USE(JSVALUE32_64)
726 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
727 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
728#endif
729 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
730 }
731 }
732 }
733
734 RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType()));
735
736 privateCompileMainPass();
737 privateCompileLinkPass();
738 privateCompileSlowCases();
739
740 if (m_disassembler)
741 m_disassembler->setEndOfSlowPath(label());
742 m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
743
744 stackOverflow.link(this);
745 m_bytecodeIndex = BytecodeIndex(0);
746 if (maxFrameExtentForSlowPathCall)
747 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
748 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
749
750 // If the number of parameters is 1, we never require arity fixup.
751 bool requiresArityFixup = m_codeBlock->m_numParameters != 1;
752 if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) {
753 m_arityCheck = label();
754 store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
755 emitFunctionPrologue();
756 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
757
758 load32(payloadFor(CallFrameSlot::argumentCount), regT1);
759 branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
760
761 m_bytecodeIndex = BytecodeIndex(0);
762
763 if (maxFrameExtentForSlowPathCall)
764 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
765 callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, m_codeBlock->globalObject());
766 if (maxFrameExtentForSlowPathCall)
767 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
768 branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
769 move(returnValueGPR, GPRInfo::argumentGPR0);
770 emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>());
771
772#if !ASSERT_DISABLED
773 m_bytecodeIndex = BytecodeIndex(); // Reset this, in order to guard its use with ASSERTs.
774#endif
775
776 jump(beginLabel);
777 } else
778 m_arityCheck = entryLabel; // Never require arity fixup.
779
780 ASSERT(m_jmpTable.isEmpty());
781
782 privateCompileExceptionHandlers();
783
784 if (m_disassembler)
785 m_disassembler->setEndOfCode(label());
786 m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
787
788 m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_codeBlock, effort));
789
790 MonotonicTime after { };
791 if (UNLIKELY(computeCompileTimes())) {
792 after = MonotonicTime::now();
793
794 if (Options::reportTotalCompileTimes())
795 totalBaselineCompileTime += after - before;
796 }
797 if (UNLIKELY(reportCompileTimes())) {
798 CString codeBlockName = toCString(*m_codeBlock);
799
800 dataLog("Optimized ", codeBlockName, " with Baseline JIT into ", m_linkBuffer->size(), " bytes in ", (after - before).milliseconds(), " ms.\n");
801 }
802}
803
804CompilationResult JIT::link()
805{
806 LinkBuffer& patchBuffer = *m_linkBuffer;
807
808 if (patchBuffer.didFailToAllocate())
809 return CompilationFailed;
810
811 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
812 for (auto& record : m_switches) {
813 unsigned bytecodeOffset = record.bytecodeIndex.offset();
814
815 if (record.type != SwitchRecord::String) {
816 ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
817 ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
818
819 auto* simpleJumpTable = record.jumpTable.simpleJumpTable;
820 simpleJumpTable->ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]);
821
822 for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
823 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
824 simpleJumpTable->ctiOffsets[j] = offset
825 ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset])
826 : simpleJumpTable->ctiDefault;
827 }
828 } else {
829 ASSERT(record.type == SwitchRecord::String);
830
831 auto* stringJumpTable = record.jumpTable.stringJumpTable;
832 stringJumpTable->ctiDefault =
833 patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]);
834
835 for (auto& location : stringJumpTable->offsetTable.values()) {
836 unsigned offset = location.branchOffset;
837 location.ctiOffset = offset
838 ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset])
839 : stringJumpTable->ctiDefault;
840 }
841 }
842 }
843
844 for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
845 HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
846 // FIXME: <rdar://problem/39433318>.
847 handler.nativeCode = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_labels[handler.target]);
848 }
849
850 for (auto& record : m_calls) {
851 if (record.callee)
852 patchBuffer.link(record.from, record.callee);
853 }
854
855 finalizeInlineCaches(m_getByIds, patchBuffer);
856 finalizeInlineCaches(m_getByVals, patchBuffer);
857 finalizeInlineCaches(m_getByIdsWithThis, patchBuffer);
858 finalizeInlineCaches(m_putByIds, patchBuffer);
859 finalizeInlineCaches(m_inByIds, patchBuffer);
860 finalizeInlineCaches(m_instanceOfs, patchBuffer);
861
862 if (m_byValCompilationInfo.size()) {
863 CodeLocationLabel<ExceptionHandlerPtrTag> exceptionHandler = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_exceptionHandler);
864
865 for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
866 PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
867 auto notIndexJump = CodeLocationJump<JSInternalPtrTag>();
868 if (Jump(patchableNotIndexJump).isSet())
869 notIndexJump = CodeLocationJump<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(patchableNotIndexJump));
870 auto badTypeJump = CodeLocationJump<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(byValCompilationInfo.badTypeJump));
871 auto doneTarget = CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(byValCompilationInfo.doneTarget));
872 auto nextHotPathTarget = CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(byValCompilationInfo.nextHotPathTarget));
873 auto slowPathTarget = CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(byValCompilationInfo.slowPathTarget));
874
875 *byValCompilationInfo.byValInfo = ByValInfo(
876 byValCompilationInfo.bytecodeIndex,
877 notIndexJump,
878 badTypeJump,
879 exceptionHandler,
880 byValCompilationInfo.arrayMode,
881 byValCompilationInfo.arrayProfile,
882 doneTarget,
883 nextHotPathTarget,
884 slowPathTarget);
885 }
886 }
887
888 for (auto& compilationInfo : m_callCompilationInfo) {
889 CallLinkInfo& info = *compilationInfo.callLinkInfo;
890 info.setCallLocations(
891 CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOfNearCall<JSInternalPtrTag>(compilationInfo.callReturnLocation)),
892 CodeLocationLabel<JSInternalPtrTag>(patchBuffer.locationOf<JSInternalPtrTag>(compilationInfo.hotPathBegin)),
893 patchBuffer.locationOfNearCall<JSInternalPtrTag>(compilationInfo.hotPathOther));
894 }
895
896 JITCodeMap jitCodeMap;
897 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
898 if (m_labels[bytecodeOffset].isSet())
899 jitCodeMap.append(BytecodeIndex(bytecodeOffset), patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset]));
900 }
901 jitCodeMap.finish();
902 m_codeBlock->setJITCodeMap(WTFMove(jitCodeMap));
903
904 MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck);
905
906 if (Options::dumpDisassembly()) {
907 m_disassembler->dump(patchBuffer);
908 patchBuffer.didAlreadyDisassemble();
909 }
910 if (UNLIKELY(m_compilation)) {
911 if (Options::disassembleBaselineForProfiler())
912 m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
913 m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, *m_compilation);
914 }
915
916 if (m_pcToCodeOriginMapBuilder.didBuildMapping())
917 m_codeBlock->setPCToCodeOriginMap(makeUnique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
918
919 CodeRef<JSEntryPtrTag> result = FINALIZE_CODE(
920 patchBuffer, JSEntryPtrTag,
921 "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITType::BaselineJIT)).data());
922
923 m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add(
924 static_cast<double>(result.size()) /
925 static_cast<double>(m_codeBlock->instructionsSize()));
926
927 m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
928 m_codeBlock->setJITCode(
929 adoptRef(*new DirectJITCode(result, withArityCheck, JITType::BaselineJIT)));
930
931 if (JITInternal::verbose)
932 dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr());
933
934 return CompilationSuccessful;
935}
936
937CompilationResult JIT::privateCompile(JITCompilationEffort effort)
938{
939 doMainThreadPreparationBeforeCompile();
940 compileWithoutLinking(effort);
941 return link();
942}
943
944void JIT::privateCompileExceptionHandlers()
945{
946 if (!m_exceptionChecksWithCallFrameRollback.empty()) {
947 m_exceptionChecksWithCallFrameRollback.link(this);
948
949 copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
950
951 // operationLookupExceptionHandlerFromCallerFrame is passed one argument, the VM*.
952 move(TrustedImmPtr(&vm()), GPRInfo::argumentGPR0);
953 prepareCallOperation(vm());
954 m_calls.append(CallRecord(call(OperationPtrTag), BytecodeIndex(), FunctionPtr<OperationPtrTag>(operationLookupExceptionHandlerFromCallerFrame)));
955 jumpToExceptionHandler(vm());
956 }
957
958 if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) {
959 m_exceptionHandler = label();
960 m_exceptionChecks.link(this);
961
962 copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
963
964 // operationLookupExceptionHandler is passed one argument, the VM*.
965 move(TrustedImmPtr(&vm()), GPRInfo::argumentGPR0);
966 prepareCallOperation(vm());
967 m_calls.append(CallRecord(call(OperationPtrTag), BytecodeIndex(), FunctionPtr<OperationPtrTag>(operationLookupExceptionHandler)));
968 jumpToExceptionHandler(vm());
969 }
970}
971
972void JIT::doMainThreadPreparationBeforeCompile()
973{
974 // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
975 if (m_vm->typeProfiler())
976 m_vm->typeProfilerLog()->processLogEntries(*m_vm, "Preparing for JIT compilation."_s);
977}
978
979unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
980{
981 ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals())));
982
983 return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters);
984}
985
986int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
987{
988 return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
989}
990
991bool JIT::reportCompileTimes()
992{
993 return Options::reportCompileTimes() || Options::reportBaselineCompileTimes();
994}
995
996bool JIT::computeCompileTimes()
997{
998 return reportCompileTimes() || Options::reportTotalCompileTimes();
999}
1000
1001HashMap<CString, Seconds> JIT::compileTimeStats()
1002{
1003 HashMap<CString, Seconds> result;
1004 if (Options::reportTotalCompileTimes()) {
1005 result.add("Total Compile Time", totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime);
1006 result.add("Baseline Compile Time", totalBaselineCompileTime);
1007#if ENABLE(DFG_JIT)
1008 result.add("DFG Compile Time", totalDFGCompileTime);
1009#if ENABLE(FTL_JIT)
1010 result.add("FTL Compile Time", totalFTLCompileTime);
1011 result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
1012 result.add("FTL (B3) Compile Time", totalFTLB3CompileTime);
1013#endif // ENABLE(FTL_JIT)
1014#endif // ENABLE(DFG_JIT)
1015 }
1016 return result;
1017}
1018
1019Seconds JIT::totalCompileTime()
1020{
1021 return totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime;
1022}
1023
1024} // namespace JSC
1025
1026#endif // ENABLE(JIT)
1027