1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2008 Cameron Zwarich <[email protected]> |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of |
15 | * its contributors may be used to endorse or promote products derived |
16 | * from this software without specific prior written permission. |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | */ |
29 | |
30 | #include "config.h" |
31 | #include "CodeBlock.h" |
32 | |
33 | #include "ArithProfile.h" |
34 | #include "BasicBlockLocation.h" |
35 | #include "BytecodeDumper.h" |
36 | #include "BytecodeGenerator.h" |
37 | #include "BytecodeLivenessAnalysis.h" |
38 | #include "BytecodeStructs.h" |
39 | #include "BytecodeUseDef.h" |
40 | #include "CallLinkStatus.h" |
41 | #include "CodeBlockInlines.h" |
42 | #include "CodeBlockSet.h" |
43 | #include "DFGCapabilities.h" |
44 | #include "DFGCommon.h" |
45 | #include "DFGDriver.h" |
46 | #include "DFGJITCode.h" |
47 | #include "DFGWorklist.h" |
48 | #include "Debugger.h" |
49 | #include "EvalCodeBlock.h" |
50 | #include "FullCodeOrigin.h" |
51 | #include "FunctionCodeBlock.h" |
52 | #include "FunctionExecutableDump.h" |
53 | #include "GetPutInfo.h" |
54 | #include "InlineCallFrame.h" |
55 | #include "Instruction.h" |
56 | #include "InstructionStream.h" |
57 | #include "InterpreterInlines.h" |
58 | #include "IsoCellSetInlines.h" |
59 | #include "JIT.h" |
60 | #include "JITMathIC.h" |
61 | #include "JSBigInt.h" |
62 | #include "JSCInlines.h" |
63 | #include "JSCJSValue.h" |
64 | #include "JSFunction.h" |
65 | #include "JSLexicalEnvironment.h" |
66 | #include "JSModuleEnvironment.h" |
67 | #include "JSSet.h" |
68 | #include "JSString.h" |
69 | #include "JSTemplateObjectDescriptor.h" |
70 | #include "LLIntData.h" |
71 | #include "LLIntEntrypoint.h" |
72 | #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h" |
73 | #include "LowLevelInterpreter.h" |
74 | #include "MetadataTable.h" |
75 | #include "ModuleProgramCodeBlock.h" |
76 | #include "ObjectAllocationProfileInlines.h" |
77 | #include "OpcodeInlines.h" |
78 | #include "PCToCodeOriginMap.h" |
79 | #include "PolymorphicAccess.h" |
80 | #include "ProfilerDatabase.h" |
81 | #include "ProgramCodeBlock.h" |
82 | #include "ReduceWhitespace.h" |
83 | #include "Repatch.h" |
84 | #include "SlotVisitorInlines.h" |
85 | #include "StackVisitor.h" |
86 | #include "StructureStubInfo.h" |
87 | #include "TypeLocationCache.h" |
88 | #include "TypeProfiler.h" |
89 | #include "VMInlines.h" |
90 | #include <wtf/BagToHashMap.h> |
91 | #include <wtf/CommaPrinter.h> |
92 | #include <wtf/Forward.h> |
93 | #include <wtf/SimpleStats.h> |
94 | #include <wtf/StringPrintStream.h> |
95 | #include <wtf/text/StringConcatenateNumbers.h> |
96 | #include <wtf/text/UniquedStringImpl.h> |
97 | |
98 | #if ENABLE(ASSEMBLER) |
99 | #include "RegisterAtOffsetList.h" |
100 | #endif |
101 | |
102 | #if ENABLE(DFG_JIT) |
103 | #include "DFGOperations.h" |
104 | #endif |
105 | |
106 | #if ENABLE(FTL_JIT) |
107 | #include "FTLJITCode.h" |
108 | #endif |
109 | |
110 | namespace JSC { |
111 | |
112 | const ClassInfo CodeBlock::s_info = { |
113 | "CodeBlock" , nullptr, nullptr, nullptr, |
114 | CREATE_METHOD_TABLE(CodeBlock) |
115 | }; |
116 | |
117 | CString CodeBlock::inferredName() const |
118 | { |
119 | switch (codeType()) { |
120 | case GlobalCode: |
121 | return "<global>" ; |
122 | case EvalCode: |
123 | return "<eval>" ; |
124 | case FunctionCode: |
125 | return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8(); |
126 | case ModuleCode: |
127 | return "<module>" ; |
128 | default: |
129 | CRASH(); |
130 | return CString("" , 0); |
131 | } |
132 | } |
133 | |
134 | bool CodeBlock::hasHash() const |
135 | { |
136 | return !!m_hash; |
137 | } |
138 | |
139 | bool CodeBlock::isSafeToComputeHash() const |
140 | { |
141 | return !isCompilationThread(); |
142 | } |
143 | |
144 | CodeBlockHash CodeBlock::hash() const |
145 | { |
146 | if (!m_hash) { |
147 | RELEASE_ASSERT(isSafeToComputeHash()); |
148 | m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind()); |
149 | } |
150 | return m_hash; |
151 | } |
152 | |
153 | CString CodeBlock::sourceCodeForTools() const |
154 | { |
155 | if (codeType() != FunctionCode) |
156 | return ownerExecutable()->source().toUTF8(); |
157 | |
158 | SourceProvider* provider = source().provider(); |
159 | FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable()); |
160 | UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable(); |
161 | unsigned unlinkedStartOffset = unlinked->startOffset(); |
162 | unsigned linkedStartOffset = executable->source().startOffset(); |
163 | int delta = linkedStartOffset - unlinkedStartOffset; |
164 | unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart(); |
165 | unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength(); |
166 | return toCString( |
167 | "function " , |
168 | provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8()); |
169 | } |
170 | |
171 | CString CodeBlock::sourceCodeOnOneLine() const |
172 | { |
173 | return reduceWhitespace(sourceCodeForTools()); |
174 | } |
175 | |
176 | CString CodeBlock::hashAsStringIfPossible() const |
177 | { |
178 | if (hasHash() || isSafeToComputeHash()) |
179 | return toCString(hash()); |
180 | return "<no-hash>" ; |
181 | } |
182 | |
183 | void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const |
184 | { |
185 | out.print(inferredName(), "#" , hashAsStringIfPossible()); |
186 | out.print(":[" , RawPointer(this), "->" ); |
187 | if (!!m_alternative) |
188 | out.print(RawPointer(alternative()), "->" ); |
189 | out.print(RawPointer(ownerExecutable()), ", " , jitType, codeType()); |
190 | |
191 | if (codeType() == FunctionCode) |
192 | out.print(specializationKind()); |
193 | out.print(", " , instructionsSize()); |
194 | if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined) |
195 | out.print(" (ShouldAlwaysBeInlined)" ); |
196 | if (ownerExecutable()->neverInline()) |
197 | out.print(" (NeverInline)" ); |
198 | if (ownerExecutable()->neverOptimize()) |
199 | out.print(" (NeverOptimize)" ); |
200 | else if (ownerExecutable()->neverFTLOptimize()) |
201 | out.print(" (NeverFTLOptimize)" ); |
202 | if (ownerExecutable()->didTryToEnterInLoop()) |
203 | out.print(" (DidTryToEnterInLoop)" ); |
204 | if (ownerExecutable()->isStrictMode()) |
205 | out.print(" (StrictMode)" ); |
206 | if (m_didFailJITCompilation) |
207 | out.print(" (JITFail)" ); |
208 | if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation) |
209 | out.print(" (FTLFail)" ); |
210 | if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL) |
211 | out.print(" (HadFTLReplacement)" ); |
212 | out.print("]" ); |
213 | } |
214 | |
215 | void CodeBlock::dump(PrintStream& out) const |
216 | { |
217 | dumpAssumingJITType(out, jitType()); |
218 | } |
219 | |
220 | void CodeBlock::dumpSource() |
221 | { |
222 | dumpSource(WTF::dataFile()); |
223 | } |
224 | |
225 | void CodeBlock::dumpSource(PrintStream& out) |
226 | { |
227 | ScriptExecutable* executable = ownerExecutable(); |
228 | if (executable->isFunctionExecutable()) { |
229 | FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable); |
230 | StringView source = functionExecutable->source().provider()->getRange( |
231 | functionExecutable->parametersStartOffset(), |
232 | functionExecutable->typeProfilingEndOffset(*vm()) + 1); // Type profiling end offset is the character before the '}'. |
233 | |
234 | out.print("function " , inferredName(), source); |
235 | return; |
236 | } |
237 | out.print(executable->source().view()); |
238 | } |
239 | |
240 | void CodeBlock::dumpBytecode() |
241 | { |
242 | dumpBytecode(WTF::dataFile()); |
243 | } |
244 | |
245 | void CodeBlock::dumpBytecode(PrintStream& out) |
246 | { |
247 | ICStatusMap statusMap; |
248 | getICStatusMap(statusMap); |
249 | BytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap); |
250 | } |
251 | |
252 | void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap) |
253 | { |
254 | BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap); |
255 | } |
256 | |
257 | void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap) |
258 | { |
259 | const auto it = instructions().at(bytecodeOffset); |
260 | dumpBytecode(out, it, statusMap); |
261 | } |
262 | |
263 | namespace { |
264 | |
265 | class PutToScopeFireDetail : public FireDetail { |
266 | public: |
267 | PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident) |
268 | : m_codeBlock(codeBlock) |
269 | , m_ident(ident) |
270 | { |
271 | } |
272 | |
273 | void dump(PrintStream& out) const override |
274 | { |
275 | out.print("Linking put_to_scope in " , FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for " , m_ident); |
276 | } |
277 | |
278 | private: |
279 | CodeBlock* m_codeBlock; |
280 | const Identifier& m_ident; |
281 | }; |
282 | |
283 | } // anonymous namespace |
284 | |
285 | CodeBlock::CodeBlock(VM* vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other) |
286 | : JSCell(*vm, structure) |
287 | , m_globalObject(other.m_globalObject) |
288 | , m_shouldAlwaysBeInlined(true) |
289 | #if ENABLE(JIT) |
290 | , m_capabilityLevelState(DFG::CapabilityLevelNotSet) |
291 | #endif |
292 | , m_didFailJITCompilation(false) |
293 | , m_didFailFTLCompilation(false) |
294 | , m_hasBeenCompiledWithFTL(false) |
295 | , m_numCalleeLocals(other.m_numCalleeLocals) |
296 | , m_numVars(other.m_numVars) |
297 | , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip) |
298 | , m_hasDebuggerStatement(false) |
299 | , m_steppingMode(SteppingModeDisabled) |
300 | , m_numBreakpoints(0) |
301 | , m_bytecodeCost(other.m_bytecodeCost) |
302 | , m_scopeRegister(other.m_scopeRegister) |
303 | , m_hash(other.m_hash) |
304 | , m_unlinkedCode(*other.vm(), this, other.m_unlinkedCode.get()) |
305 | , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get()) |
306 | , m_vm(other.m_vm) |
307 | , m_instructionsRawPointer(other.m_instructionsRawPointer) |
308 | , m_constantRegisters(other.m_constantRegisters) |
309 | , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation) |
310 | , m_functionDecls(other.m_functionDecls) |
311 | , m_functionExprs(other.m_functionExprs) |
312 | , m_osrExitCounter(0) |
313 | , m_optimizationDelayCounter(0) |
314 | , m_reoptimizationRetryCounter(0) |
315 | , m_metadata(other.m_metadata) |
316 | , m_creationTime(MonotonicTime::now()) |
317 | { |
318 | ASSERT(heap()->isDeferred()); |
319 | ASSERT(m_scopeRegister.isLocal()); |
320 | |
321 | ASSERT(source().provider()); |
322 | setNumParameters(other.numParameters()); |
323 | |
324 | vm->heap.codeBlockSet().add(this); |
325 | } |
326 | |
327 | void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other) |
328 | { |
329 | Base::finishCreation(vm); |
330 | finishCreationCommon(vm); |
331 | |
332 | optimizeAfterWarmUp(); |
333 | jitAfterWarmUp(); |
334 | |
335 | if (other.m_rareData) { |
336 | createRareDataIfNecessary(); |
337 | |
338 | m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers; |
339 | m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables; |
340 | m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables; |
341 | } |
342 | } |
343 | |
344 | CodeBlock::CodeBlock(VM* vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope) |
345 | : JSCell(*vm, structure) |
346 | , m_globalObject(*vm, this, scope->globalObject(*vm)) |
347 | , m_shouldAlwaysBeInlined(true) |
348 | #if ENABLE(JIT) |
349 | , m_capabilityLevelState(DFG::CapabilityLevelNotSet) |
350 | #endif |
351 | , m_didFailJITCompilation(false) |
352 | , m_didFailFTLCompilation(false) |
353 | , m_hasBeenCompiledWithFTL(false) |
354 | , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals()) |
355 | , m_numVars(unlinkedCodeBlock->numVars()) |
356 | , m_hasDebuggerStatement(false) |
357 | , m_steppingMode(SteppingModeDisabled) |
358 | , m_numBreakpoints(0) |
359 | , m_scopeRegister(unlinkedCodeBlock->scopeRegister()) |
360 | , m_unlinkedCode(*vm, this, unlinkedCodeBlock) |
361 | , m_ownerExecutable(*vm, this, ownerExecutable) |
362 | , m_vm(vm) |
363 | , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer()) |
364 | , m_osrExitCounter(0) |
365 | , m_optimizationDelayCounter(0) |
366 | , m_reoptimizationRetryCounter(0) |
367 | , m_metadata(unlinkedCodeBlock->metadata().link()) |
368 | , m_creationTime(MonotonicTime::now()) |
369 | { |
370 | ASSERT(heap()->isDeferred()); |
371 | ASSERT(m_scopeRegister.isLocal()); |
372 | |
373 | ASSERT(source().provider()); |
374 | setNumParameters(unlinkedCodeBlock->numParameters()); |
375 | |
376 | vm->heap.codeBlockSet().add(this); |
377 | } |
378 | |
379 | // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process |
380 | // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope |
381 | // chain. For example, this process allows us to cache the depth of lexical environment reads that reach |
382 | // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that |
383 | // we can't generate during unlinked bytecode generation. This process is not allowed to generate control |
384 | // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for |
385 | // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis |
386 | // inside UnlinkedCodeBlock. |
387 | bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, |
388 | JSScope* scope) |
389 | { |
390 | Base::finishCreation(vm); |
391 | finishCreationCommon(vm); |
392 | |
393 | auto throwScope = DECLARE_THROW_SCOPE(vm); |
394 | |
395 | if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes()) |
396 | vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm)); |
397 | |
398 | ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable(); |
399 | setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable); |
400 | RETURN_IF_EXCEPTION(throwScope, false); |
401 | |
402 | for (unsigned i = 0; i < LinkTimeConstantCount; i++) { |
403 | LinkTimeConstant type = static_cast<LinkTimeConstant>(i); |
404 | if (unsigned registerIndex = unlinkedCodeBlock->registerIndexForLinkTimeConstant(type)) |
405 | m_constantRegisters[registerIndex].set(vm, this, m_globalObject->jsCellForLinkTimeConstant(type)); |
406 | } |
407 | |
408 | // We already have the cloned symbol table for the module environment since we need to instantiate |
409 | // the module environments before linking the code block. We replace the stored symbol table with the already cloned one. |
410 | if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) { |
411 | SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable(); |
412 | if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) { |
413 | ConcurrentJSLocker locker(clonedSymbolTable->m_lock); |
414 | clonedSymbolTable->prepareForTypeProfiling(locker); |
415 | } |
416 | replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable); |
417 | } |
418 | |
419 | bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes(); |
420 | m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls()); |
421 | for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) { |
422 | UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i); |
423 | if (shouldUpdateFunctionHasExecutedCache) |
424 | vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); |
425 | m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source())); |
426 | } |
427 | |
428 | m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs()); |
429 | for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) { |
430 | UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i); |
431 | if (shouldUpdateFunctionHasExecutedCache) |
432 | vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); |
433 | m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source())); |
434 | } |
435 | |
436 | if (unlinkedCodeBlock->hasRareData()) { |
437 | createRareDataIfNecessary(); |
438 | |
439 | setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets()); |
440 | RETURN_IF_EXCEPTION(throwScope, false); |
441 | |
442 | if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) { |
443 | m_rareData->m_exceptionHandlers.resizeToFit(count); |
444 | for (size_t i = 0; i < count; i++) { |
445 | const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i); |
446 | HandlerInfo& handler = m_rareData->m_exceptionHandlers[i]; |
447 | #if ENABLE(JIT) |
448 | auto instruction = instructions().at(unlinkedHandler.target); |
449 | MacroAssemblerCodePtr<BytecodePtrTag> codePtr; |
450 | if (instruction->isWide32()) |
451 | codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch); |
452 | else if (instruction->isWide16()) |
453 | codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch); |
454 | else |
455 | codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch); |
456 | handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>())); |
457 | #else |
458 | handler.initialize(unlinkedHandler); |
459 | #endif |
460 | } |
461 | } |
462 | |
463 | if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) { |
464 | m_rareData->m_stringSwitchJumpTables.grow(count); |
465 | for (size_t i = 0; i < count; i++) { |
466 | UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin(); |
467 | UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end(); |
468 | for (; ptr != end; ++ptr) { |
469 | OffsetLocation offset; |
470 | offset.branchOffset = ptr->value.branchOffset; |
471 | m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset); |
472 | } |
473 | } |
474 | } |
475 | |
476 | if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) { |
477 | m_rareData->m_switchJumpTables.grow(count); |
478 | for (size_t i = 0; i < count; i++) { |
479 | UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i); |
480 | SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i]; |
481 | destTable.branchOffsets = sourceTable.branchOffsets; |
482 | destTable.min = sourceTable.min; |
483 | } |
484 | } |
485 | } |
486 | |
487 | // Bookkeep the strongly referenced module environments. |
488 | HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments; |
489 | |
490 | auto link_profile = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& /*metadata*/) { |
491 | m_numberOfNonArgumentValueProfiles++; |
492 | }; |
493 | |
494 | auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) { |
495 | metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity); |
496 | }; |
497 | |
498 | auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) { |
499 | metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType); |
500 | }; |
501 | |
502 | #define LINK_FIELD(__field) \ |
503 | WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata); |
504 | |
505 | #define INITIALIZE_METADATA(__op) \ |
506 | auto bytecode = instruction->as<__op>(); \ |
507 | auto& metadata = bytecode.metadata(this); \ |
508 | new (&metadata) __op::Metadata { bytecode }; \ |
509 | |
510 | #define CASE(__op) case __op::opcodeID |
511 | |
512 | #define LINK(...) \ |
513 | CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \ |
514 | INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \ |
515 | WTF_LAZY_HAS_REST(__VA_ARGS__)({ \ |
516 | WTF_LAZY_FOR_EACH_TERM(LINK_FIELD, WTF_LAZY_REST_(__VA_ARGS__)) \ |
517 | }) \ |
518 | break; \ |
519 | } |
520 | |
521 | const InstructionStream& instructionStream = instructions(); |
522 | for (const auto& instruction : instructionStream) { |
523 | OpcodeID opcodeID = instruction->opcodeID(); |
524 | m_bytecodeCost += opcodeLengths[opcodeID]; |
525 | switch (opcodeID) { |
526 | LINK(OpHasIndexedProperty) |
527 | |
528 | LINK(OpCallVarargs, profile) |
529 | LINK(OpTailCallVarargs, profile) |
530 | LINK(OpTailCallForwardArguments, profile) |
531 | LINK(OpConstructVarargs, profile) |
532 | LINK(OpGetByVal, profile) |
533 | |
534 | LINK(OpGetDirectPname, profile) |
535 | LINK(OpGetByIdWithThis, profile) |
536 | LINK(OpTryGetById, profile) |
537 | LINK(OpGetByIdDirect, profile) |
538 | LINK(OpGetByValWithThis, profile) |
539 | LINK(OpGetFromArguments, profile) |
540 | LINK(OpToNumber, profile) |
541 | LINK(OpToObject, profile) |
542 | LINK(OpGetArgument, profile) |
543 | LINK(OpToThis, profile) |
544 | LINK(OpBitand, profile) |
545 | LINK(OpBitor, profile) |
546 | LINK(OpBitnot, profile) |
547 | LINK(OpBitxor, profile) |
548 | |
549 | LINK(OpGetById, profile) |
550 | |
551 | LINK(OpCall, profile) |
552 | LINK(OpTailCall, profile) |
553 | LINK(OpCallEval, profile) |
554 | LINK(OpConstruct, profile) |
555 | |
556 | LINK(OpInByVal) |
557 | LINK(OpPutByVal) |
558 | LINK(OpPutByValDirect) |
559 | |
560 | LINK(OpNewArray) |
561 | LINK(OpNewArrayWithSize) |
562 | LINK(OpNewArrayBuffer, arrayAllocationProfile) |
563 | |
564 | LINK(OpNewObject, objectAllocationProfile) |
565 | |
566 | LINK(OpPutById) |
567 | LINK(OpCreateThis) |
568 | |
569 | LINK(OpAdd) |
570 | LINK(OpMul) |
571 | LINK(OpDiv) |
572 | LINK(OpSub) |
573 | |
574 | LINK(OpNegate) |
575 | |
576 | LINK(OpJneqPtr) |
577 | |
578 | LINK(OpCatch) |
579 | LINK(OpProfileControlFlow) |
580 | |
581 | case op_resolve_scope: { |
582 | INITIALIZE_METADATA(OpResolveScope) |
583 | |
584 | const Identifier& ident = identifier(bytecode.m_var); |
585 | RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar); |
586 | |
587 | ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization); |
588 | RETURN_IF_EXCEPTION(throwScope, false); |
589 | |
590 | metadata.m_resolveType = op.type; |
591 | metadata.m_localScopeDepth = op.depth; |
592 | if (op.lexicalEnvironment) { |
593 | if (op.type == ModuleVar) { |
594 | // Keep the linked module environment strongly referenced. |
595 | if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry) |
596 | addConstant(op.lexicalEnvironment); |
597 | metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment); |
598 | } else |
599 | metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable()); |
600 | } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) { |
601 | metadata.m_constantScope.set(vm, this, constantScope); |
602 | if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks) |
603 | metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch(); |
604 | } else |
605 | metadata.m_globalObject = nullptr; |
606 | break; |
607 | } |
608 | |
609 | case op_get_from_scope: { |
610 | INITIALIZE_METADATA(OpGetFromScope) |
611 | |
612 | link_profile(instruction, bytecode, metadata); |
613 | metadata.m_watchpointSet = nullptr; |
614 | |
615 | ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode())); |
616 | if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) { |
617 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode()); |
618 | break; |
619 | } |
620 | |
621 | const Identifier& ident = identifier(bytecode.m_var); |
622 | ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization); |
623 | RETURN_IF_EXCEPTION(throwScope, false); |
624 | |
625 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode()); |
626 | if (op.type == ModuleVar) |
627 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode()); |
628 | if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) |
629 | metadata.m_watchpointSet = op.watchpointSet; |
630 | else if (op.structure) |
631 | metadata.m_structure.set(vm, this, op.structure); |
632 | metadata.m_operand = op.operand; |
633 | break; |
634 | } |
635 | |
636 | case op_put_to_scope: { |
637 | INITIALIZE_METADATA(OpPutToScope) |
638 | |
639 | if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) { |
640 | // Only do watching if the property we're putting to is not anonymous. |
641 | if (bytecode.m_var != UINT_MAX) { |
642 | SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset())); |
643 | const Identifier& ident = identifier(bytecode.m_var); |
644 | ConcurrentJSLocker locker(symbolTable->m_lock); |
645 | auto iter = symbolTable->find(locker, ident.impl()); |
646 | ASSERT(iter != symbolTable->end(locker)); |
647 | iter->value.prepareToWatch(); |
648 | metadata.m_watchpointSet = iter->value.watchpointSet(); |
649 | } else |
650 | metadata.m_watchpointSet = nullptr; |
651 | break; |
652 | } |
653 | |
654 | const Identifier& ident = identifier(bytecode.m_var); |
655 | metadata.m_watchpointSet = nullptr; |
656 | ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode()); |
657 | RETURN_IF_EXCEPTION(throwScope, false); |
658 | |
659 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode()); |
660 | if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) |
661 | metadata.m_watchpointSet = op.watchpointSet; |
662 | else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { |
663 | if (op.watchpointSet) |
664 | op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident)); |
665 | } else if (op.structure) |
666 | metadata.m_structure.set(vm, this, op.structure); |
667 | metadata.m_operand = op.operand; |
668 | break; |
669 | } |
670 | |
671 | case op_profile_type: { |
672 | RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()); |
673 | |
674 | INITIALIZE_METADATA(OpProfileType) |
675 | |
676 | size_t instructionOffset = instruction.offset() + instruction->size() - 1; |
677 | unsigned divotStart, divotEnd; |
678 | GlobalVariableID globalVariableID = 0; |
679 | RefPtr<TypeSet> globalTypeSet; |
680 | bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); |
681 | SymbolTable* symbolTable = nullptr; |
682 | |
683 | switch (bytecode.m_flag) { |
684 | case ProfileTypeBytecodeClosureVar: { |
685 | const Identifier& ident = identifier(bytecode.m_identifier); |
686 | unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth(); |
687 | // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because |
688 | // we're abstractly "read"ing from a JSScope. |
689 | ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization); |
690 | RETURN_IF_EXCEPTION(throwScope, false); |
691 | |
692 | if (op.type == ClosureVar || op.type == ModuleVar) |
693 | symbolTable = op.lexicalEnvironment->symbolTable(); |
694 | else if (op.type == GlobalVar) |
695 | symbolTable = m_globalObject.get()->symbolTable(); |
696 | |
697 | UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl(); |
698 | if (symbolTable) { |
699 | ConcurrentJSLocker locker(symbolTable->m_lock); |
700 | // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. |
701 | symbolTable->prepareForTypeProfiling(locker); |
702 | globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm); |
703 | globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm); |
704 | } else |
705 | globalVariableID = TypeProfilerNoGlobalIDExists; |
706 | |
707 | break; |
708 | } |
709 | case ProfileTypeBytecodeLocallyResolved: { |
710 | int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset(); |
711 | SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); |
712 | const Identifier& ident = identifier(bytecode.m_identifier); |
713 | ConcurrentJSLocker locker(symbolTable->m_lock); |
714 | // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. |
715 | globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm); |
716 | globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm); |
717 | |
718 | break; |
719 | } |
720 | case ProfileTypeBytecodeDoesNotHaveGlobalID: |
721 | case ProfileTypeBytecodeFunctionArgument: { |
722 | globalVariableID = TypeProfilerNoGlobalIDExists; |
723 | break; |
724 | } |
725 | case ProfileTypeBytecodeFunctionReturnStatement: { |
726 | RELEASE_ASSERT(ownerExecutable->isFunctionExecutable()); |
727 | globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet(); |
728 | globalVariableID = TypeProfilerReturnStatement; |
729 | if (!shouldAnalyze) { |
730 | // Because a return statement can be added implicitly to return undefined at the end of a function, |
731 | // and these nodes don't emit expression ranges because they aren't in the actual source text of |
732 | // the user's program, give the type profiler some range to identify these return statements. |
733 | // Currently, the text offset that is used as identification is "f" in the function keyword |
734 | // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable. |
735 | divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm); |
736 | shouldAnalyze = true; |
737 | } |
738 | break; |
739 | } |
740 | } |
741 | |
742 | std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID, |
743 | ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm); |
744 | TypeLocation* location = locationPair.first; |
745 | bool isNewLocation = locationPair.second; |
746 | |
747 | if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement) |
748 | location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm); |
749 | |
750 | if (shouldAnalyze && isNewLocation) |
751 | vm.typeProfiler()->insertNewLocation(location); |
752 | |
753 | metadata.m_typeLocation = location; |
754 | break; |
755 | } |
756 | |
757 | case op_debug: { |
758 | if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint) |
759 | m_hasDebuggerStatement = true; |
760 | break; |
761 | } |
762 | |
763 | case op_create_rest: { |
764 | int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip; |
765 | ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0); |
766 | // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT."); |
767 | m_numberOfArgumentsToSkip = numberOfArgumentsToSkip; |
768 | break; |
769 | } |
770 | |
771 | default: |
772 | break; |
773 | } |
774 | } |
775 | |
776 | #undef CASE |
777 | #undef INITIALIZE_METADATA |
778 | #undef LINK_FIELD |
779 | #undef LINK |
780 | |
781 | if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes()) |
782 | insertBasicBlockBoundariesForControlFlowProfiler(); |
783 | |
784 | // Set optimization thresholds only after instructions is initialized, since these |
785 | // rely on the instruction count (and are in theory permitted to also inspect the |
786 | // instruction stream to more accurate assess the cost of tier-up). |
787 | optimizeAfterWarmUp(); |
788 | jitAfterWarmUp(); |
789 | |
790 | // If the concurrent thread will want the code block's hash, then compute it here |
791 | // synchronously. |
792 | if (Options::alwaysComputeHash()) |
793 | hash(); |
794 | |
795 | if (Options::dumpGeneratedBytecodes()) |
796 | dumpBytecode(); |
797 | |
798 | if (m_metadata) |
799 | vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes()); |
800 | |
801 | return true; |
802 | } |
803 | |
804 | void CodeBlock::finishCreationCommon(VM& vm) |
805 | { |
806 | m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this)); |
807 | } |
808 | |
809 | CodeBlock::~CodeBlock() |
810 | { |
811 | VM& vm = *m_vm; |
812 | |
813 | vm.heap.codeBlockSet().remove(this); |
814 | |
815 | if (UNLIKELY(vm.m_perBytecodeProfiler)) |
816 | vm.m_perBytecodeProfiler->notifyDestruction(this); |
817 | |
818 | if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState) |
819 | unlinkedCodeBlock()->setDidOptimize(FalseTriState); |
820 | |
821 | #if ENABLE(VERBOSE_VALUE_PROFILE) |
822 | dumpValueProfiles(); |
823 | #endif |
824 | |
825 | // We may be destroyed before any CodeBlocks that refer to us are destroyed. |
826 | // Consider that two CodeBlocks become unreachable at the same time. There |
827 | // is no guarantee about the order in which the CodeBlocks are destroyed. |
828 | // So, if we don't remove incoming calls, and get destroyed before the |
829 | // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's |
830 | // destructor will try to remove nodes from our (no longer valid) linked list. |
831 | unlinkIncomingCalls(); |
832 | |
833 | // Note that our outgoing calls will be removed from other CodeBlocks' |
834 | // m_incomingCalls linked lists through the execution of the ~CallLinkInfo |
835 | // destructors. |
836 | |
837 | #if ENABLE(JIT) |
838 | if (auto* jitData = m_jitData.get()) { |
839 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) { |
840 | stubInfo->aboutToDie(); |
841 | stubInfo->deref(); |
842 | } |
843 | } |
844 | #endif // ENABLE(JIT) |
845 | } |
846 | |
847 | void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants) |
848 | { |
849 | auto scope = DECLARE_THROW_SCOPE(vm); |
850 | JSGlobalObject* globalObject = m_globalObject.get(); |
851 | ExecState* exec = globalObject->globalExec(); |
852 | |
853 | for (const auto& entry : constants) { |
854 | const IdentifierSet& set = entry.first; |
855 | |
856 | Structure* setStructure = globalObject->setStructure(); |
857 | RETURN_IF_EXCEPTION(scope, void()); |
858 | JSSet* jsSet = JSSet::create(exec, vm, setStructure, set.size()); |
859 | RETURN_IF_EXCEPTION(scope, void()); |
860 | |
861 | for (auto setEntry : set) { |
862 | JSString* jsString = jsOwnedString(&vm, setEntry.get()); |
863 | jsSet->add(exec, jsString); |
864 | RETURN_IF_EXCEPTION(scope, void()); |
865 | } |
866 | m_constantRegisters[entry.second].set(vm, this, jsSet); |
867 | } |
868 | } |
869 | |
870 | void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable) |
871 | { |
872 | VM& vm = *m_vm; |
873 | auto scope = DECLARE_THROW_SCOPE(vm); |
874 | JSGlobalObject* globalObject = m_globalObject.get(); |
875 | ExecState* exec = globalObject->globalExec(); |
876 | |
877 | ASSERT(constants.size() == constantsSourceCodeRepresentation.size()); |
878 | size_t count = constants.size(); |
879 | m_constantRegisters.resizeToFit(count); |
880 | for (size_t i = 0; i < count; i++) { |
881 | JSValue constant = constants[i].get(); |
882 | |
883 | if (!constant.isEmpty()) { |
884 | if (constant.isCell()) { |
885 | JSCell* cell = constant.asCell(); |
886 | if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) { |
887 | if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) { |
888 | ConcurrentJSLocker locker(symbolTable->m_lock); |
889 | symbolTable->prepareForTypeProfiling(locker); |
890 | } |
891 | |
892 | SymbolTable* clone = symbolTable->cloneScopePart(vm); |
893 | if (wasCompiledWithDebuggingOpcodes()) |
894 | clone->setRareDataCodeBlock(this); |
895 | |
896 | constant = clone; |
897 | } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) { |
898 | auto* templateObject = topLevelExecutable->createTemplateObject(exec, descriptor); |
899 | RETURN_IF_EXCEPTION(scope, void()); |
900 | constant = templateObject; |
901 | } |
902 | } |
903 | } |
904 | |
905 | m_constantRegisters[i].set(vm, this, constant); |
906 | } |
907 | |
908 | m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation; |
909 | } |
910 | |
911 | void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative) |
912 | { |
913 | RELEASE_ASSERT(alternative); |
914 | RELEASE_ASSERT(alternative->jitCode()); |
915 | m_alternative.set(vm, this, alternative); |
916 | } |
917 | |
918 | void CodeBlock::setNumParameters(int newValue) |
919 | { |
920 | m_numParameters = newValue; |
921 | |
922 | m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm()->canUseJIT() ? newValue : 0); |
923 | } |
924 | |
925 | CodeBlock* CodeBlock::specialOSREntryBlockOrNull() |
926 | { |
927 | #if ENABLE(FTL_JIT) |
928 | if (jitType() != JITType::DFGJIT) |
929 | return 0; |
930 | DFG::JITCode* jitCode = m_jitCode->dfg(); |
931 | return jitCode->osrEntryBlock(); |
932 | #else // ENABLE(FTL_JIT) |
933 | return 0; |
934 | #endif // ENABLE(FTL_JIT) |
935 | } |
936 | |
937 | size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm) |
938 | { |
939 | CodeBlock* thisObject = jsCast<CodeBlock*>(cell); |
940 | size_t = 0; |
941 | if (thisObject->m_metadata) |
942 | extraMemoryAllocated += thisObject->m_metadata->sizeInBytes(); |
943 | RefPtr<JITCode> jitCode = thisObject->m_jitCode; |
944 | if (jitCode && !jitCode->isShared()) |
945 | extraMemoryAllocated += jitCode->size(); |
946 | return Base::estimatedSize(cell, vm) + extraMemoryAllocated; |
947 | } |
948 | |
949 | void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) |
950 | { |
951 | CodeBlock* thisObject = jsCast<CodeBlock*>(cell); |
952 | ASSERT_GC_OBJECT_INHERITS(thisObject, info()); |
953 | Base::visitChildren(cell, visitor); |
954 | visitor.append(thisObject->m_ownerEdge); |
955 | thisObject->visitChildren(visitor); |
956 | } |
957 | |
958 | void CodeBlock::visitChildren(SlotVisitor& visitor) |
959 | { |
960 | ConcurrentJSLocker locker(m_lock); |
961 | if (CodeBlock* otherBlock = specialOSREntryBlockOrNull()) |
962 | visitor.appendUnbarriered(otherBlock); |
963 | |
964 | size_t = 0; |
965 | if (m_metadata) |
966 | extraMemory += m_metadata->sizeInBytes(); |
967 | if (m_jitCode && !m_jitCode->isShared()) |
968 | extraMemory += m_jitCode->size(); |
969 | visitor.reportExtraMemoryVisited(extraMemory); |
970 | |
971 | stronglyVisitStrongReferences(locker, visitor); |
972 | stronglyVisitWeakReferences(locker, visitor); |
973 | |
974 | VM::SpaceAndSet::setFor(*subspace()).add(this); |
975 | } |
976 | |
977 | bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker) |
978 | { |
979 | if (Options::forceCodeBlockLiveness()) |
980 | return true; |
981 | |
982 | if (shouldJettisonDueToOldAge(locker)) |
983 | return false; |
984 | |
985 | // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when |
986 | // their weak references go stale. So if a basline JIT CodeBlock gets |
987 | // scanned, we can assume that this means that it's live. |
988 | if (!JITCode::isOptimizingJIT(jitType())) |
989 | return true; |
990 | |
991 | return false; |
992 | } |
993 | |
994 | bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm) |
995 | { |
996 | if (!JITCode::isOptimizingJIT(jitType())) |
997 | return false; |
998 | return !vm.heap.isMarked(this); |
999 | } |
1000 | |
1001 | static Seconds timeToLive(JITType jitType) |
1002 | { |
1003 | if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) { |
1004 | switch (jitType) { |
1005 | case JITType::InterpreterThunk: |
1006 | return 10_ms; |
1007 | case JITType::BaselineJIT: |
1008 | return 30_ms; |
1009 | case JITType::DFGJIT: |
1010 | return 40_ms; |
1011 | case JITType::FTLJIT: |
1012 | return 120_ms; |
1013 | default: |
1014 | return Seconds::infinity(); |
1015 | } |
1016 | } |
1017 | |
1018 | switch (jitType) { |
1019 | case JITType::InterpreterThunk: |
1020 | return 5_s; |
1021 | case JITType::BaselineJIT: |
1022 | // Effectively 10 additional seconds, since BaselineJIT and |
1023 | // InterpreterThunk share a CodeBlock. |
1024 | return 15_s; |
1025 | case JITType::DFGJIT: |
1026 | return 20_s; |
1027 | case JITType::FTLJIT: |
1028 | return 60_s; |
1029 | default: |
1030 | return Seconds::infinity(); |
1031 | } |
1032 | } |
1033 | |
1034 | bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&) |
1035 | { |
1036 | if (m_vm->heap.isMarked(this)) |
1037 | return false; |
1038 | |
1039 | if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge())) |
1040 | return true; |
1041 | |
1042 | if (timeSinceCreation() < timeToLive(jitType())) |
1043 | return false; |
1044 | |
1045 | return true; |
1046 | } |
1047 | |
1048 | #if ENABLE(DFG_JIT) |
1049 | static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition) |
1050 | { |
1051 | if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get())) |
1052 | return false; |
1053 | |
1054 | if (!vm.heap.isMarked(transition.m_from.get())) |
1055 | return false; |
1056 | |
1057 | return true; |
1058 | } |
1059 | #endif // ENABLE(DFG_JIT) |
1060 | |
1061 | void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1062 | { |
1063 | UNUSED_PARAM(visitor); |
1064 | |
1065 | VM& vm = *m_vm; |
1066 | |
1067 | if (jitType() == JITType::InterpreterThunk) { |
1068 | const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); |
1069 | const InstructionStream& instructionStream = instructions(); |
1070 | for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { |
1071 | auto instruction = instructionStream.at(propertyAccessInstructions[i]); |
1072 | if (instruction->is<OpPutById>()) { |
1073 | auto& metadata = instruction->as<OpPutById>().metadata(this); |
1074 | StructureID oldStructureID = metadata.m_oldStructureID; |
1075 | StructureID newStructureID = metadata.m_newStructureID; |
1076 | if (!oldStructureID || !newStructureID) |
1077 | continue; |
1078 | Structure* oldStructure = |
1079 | vm.heap.structureIDTable().get(oldStructureID); |
1080 | Structure* newStructure = |
1081 | vm.heap.structureIDTable().get(newStructureID); |
1082 | if (vm.heap.isMarked(oldStructure)) |
1083 | visitor.appendUnbarriered(newStructure); |
1084 | continue; |
1085 | } |
1086 | } |
1087 | } |
1088 | |
1089 | #if ENABLE(JIT) |
1090 | if (JITCode::isJIT(jitType())) { |
1091 | if (auto* jitData = m_jitData.get()) { |
1092 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) |
1093 | stubInfo->propagateTransitions(visitor); |
1094 | } |
1095 | } |
1096 | #endif // ENABLE(JIT) |
1097 | |
1098 | #if ENABLE(DFG_JIT) |
1099 | if (JITCode::isOptimizingJIT(jitType())) { |
1100 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1101 | |
1102 | dfgCommon->recordedStatuses.markIfCheap(visitor); |
1103 | |
1104 | for (auto& weakReference : dfgCommon->weakStructureReferences) |
1105 | weakReference->markIfCheap(visitor); |
1106 | |
1107 | for (auto& transition : dfgCommon->transitions) { |
1108 | if (shouldMarkTransition(vm, transition)) { |
1109 | // If the following three things are live, then the target of the |
1110 | // transition is also live: |
1111 | // |
1112 | // - This code block. We know it's live already because otherwise |
1113 | // we wouldn't be scanning ourselves. |
1114 | // |
1115 | // - The code origin of the transition. Transitions may arise from |
1116 | // code that was inlined. They are not relevant if the user's |
1117 | // object that is required for the inlinee to run is no longer |
1118 | // live. |
1119 | // |
1120 | // - The source of the transition. The transition checks if some |
1121 | // heap location holds the source, and if so, stores the target. |
1122 | // Hence the source must be live for the transition to be live. |
1123 | // |
1124 | // We also short-circuit the liveness if the structure is harmless |
1125 | // to mark (i.e. its global object and prototype are both already |
1126 | // live). |
1127 | |
1128 | visitor.append(transition.m_to); |
1129 | } |
1130 | } |
1131 | } |
1132 | #endif // ENABLE(DFG_JIT) |
1133 | } |
1134 | |
1135 | void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1136 | { |
1137 | UNUSED_PARAM(visitor); |
1138 | |
1139 | #if ENABLE(DFG_JIT) |
1140 | VM& vm = *m_vm; |
1141 | if (vm.heap.isMarked(this)) |
1142 | return; |
1143 | |
1144 | // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was |
1145 | // that we might decide that the CodeBlock should be jettisoned due to old age, so the |
1146 | // isMarked check doesn't protect us. |
1147 | if (!JITCode::isOptimizingJIT(jitType())) |
1148 | return; |
1149 | |
1150 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1151 | // Now check all of our weak references. If all of them are live, then we |
1152 | // have proved liveness and so we scan our strong references. If at end of |
1153 | // GC we still have not proved liveness, then this code block is toast. |
1154 | bool allAreLiveSoFar = true; |
1155 | for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { |
1156 | JSCell* reference = dfgCommon->weakReferences[i].get(); |
1157 | ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference)); |
1158 | if (!vm.heap.isMarked(reference)) { |
1159 | allAreLiveSoFar = false; |
1160 | break; |
1161 | } |
1162 | } |
1163 | if (allAreLiveSoFar) { |
1164 | for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) { |
1165 | if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) { |
1166 | allAreLiveSoFar = false; |
1167 | break; |
1168 | } |
1169 | } |
1170 | } |
1171 | |
1172 | // If some weak references are dead, then this fixpoint iteration was |
1173 | // unsuccessful. |
1174 | if (!allAreLiveSoFar) |
1175 | return; |
1176 | |
1177 | // All weak references are live. Record this information so we don't |
1178 | // come back here again, and scan the strong references. |
1179 | visitor.appendUnbarriered(this); |
1180 | #endif // ENABLE(DFG_JIT) |
1181 | } |
1182 | |
1183 | void CodeBlock::finalizeLLIntInlineCaches() |
1184 | { |
1185 | VM& vm = *m_vm; |
1186 | const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); |
1187 | |
1188 | auto handleGetPutFromScope = [&] (auto& metadata) { |
1189 | GetPutInfo getPutInfo = metadata.m_getPutInfo; |
1190 | if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks |
1191 | || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) |
1192 | return; |
1193 | WriteBarrierBase<Structure>& structure = metadata.m_structure; |
1194 | if (!structure || vm.heap.isMarked(structure.get())) |
1195 | return; |
1196 | if (Options::verboseOSR()) |
1197 | dataLogF("Clearing scope access with structure %p.\n" , structure.get()); |
1198 | structure.clear(); |
1199 | }; |
1200 | |
1201 | const InstructionStream& instructionStream = instructions(); |
1202 | for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) { |
1203 | const auto curInstruction = instructionStream.at(propertyAccessInstructions[i]); |
1204 | switch (curInstruction->opcodeID()) { |
1205 | case op_get_by_id: { |
1206 | auto& metadata = curInstruction->as<OpGetById>().metadata(this); |
1207 | if (metadata.m_modeMetadata.mode != GetByIdMode::Default) |
1208 | break; |
1209 | StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID; |
1210 | if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID))) |
1211 | break; |
1212 | if (Options::verboseOSR()) |
1213 | dataLogF("Clearing LLInt property access.\n" ); |
1214 | LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata); |
1215 | break; |
1216 | } |
1217 | case op_get_by_id_direct: { |
1218 | auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this); |
1219 | StructureID oldStructureID = metadata.m_structureID; |
1220 | if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID))) |
1221 | break; |
1222 | if (Options::verboseOSR()) |
1223 | dataLogF("Clearing LLInt property access.\n" ); |
1224 | metadata.m_structureID = 0; |
1225 | metadata.m_offset = 0; |
1226 | break; |
1227 | } |
1228 | case op_put_by_id: { |
1229 | auto& metadata = curInstruction->as<OpPutById>().metadata(this); |
1230 | StructureID oldStructureID = metadata.m_oldStructureID; |
1231 | StructureID newStructureID = metadata.m_newStructureID; |
1232 | StructureChain* chain = metadata.m_structureChain.get(); |
1233 | if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID))) |
1234 | && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID))) |
1235 | && (!chain || vm.heap.isMarked(chain))) |
1236 | break; |
1237 | if (Options::verboseOSR()) |
1238 | dataLogF("Clearing LLInt put transition.\n" ); |
1239 | metadata.m_oldStructureID = 0; |
1240 | metadata.m_offset = 0; |
1241 | metadata.m_newStructureID = 0; |
1242 | metadata.m_structureChain.clear(); |
1243 | break; |
1244 | } |
1245 | // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418 |
1246 | // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution. |
1247 | case op_resolve_scope_for_hoisting_func_decl_in_eval: |
1248 | break; |
1249 | case op_to_this: { |
1250 | auto& metadata = curInstruction->as<OpToThis>().metadata(this); |
1251 | if (!metadata.m_cachedStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(metadata.m_cachedStructureID))) |
1252 | break; |
1253 | if (Options::verboseOSR()) { |
1254 | Structure* structure = vm.heap.structureIDTable().get(metadata.m_cachedStructureID); |
1255 | dataLogF("Clearing LLInt to_this with structure %p.\n" , structure); |
1256 | } |
1257 | metadata.m_cachedStructureID = 0; |
1258 | metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC); |
1259 | break; |
1260 | } |
1261 | case op_create_this: { |
1262 | auto& metadata = curInstruction->as<OpCreateThis>().metadata(this); |
1263 | auto& cacheWriteBarrier = metadata.m_cachedCallee; |
1264 | if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects()) |
1265 | break; |
1266 | JSCell* cachedFunction = cacheWriteBarrier.get(); |
1267 | if (vm.heap.isMarked(cachedFunction)) |
1268 | break; |
1269 | if (Options::verboseOSR()) |
1270 | dataLogF("Clearing LLInt create_this with cached callee %p.\n" , cachedFunction); |
1271 | cacheWriteBarrier.clear(); |
1272 | break; |
1273 | } |
1274 | case op_resolve_scope: { |
1275 | // Right now this isn't strictly necessary. Any symbol tables that this will refer to |
1276 | // are for outer functions, and we refer to those functions strongly, and they refer |
1277 | // to the symbol table strongly. But it's nice to be on the safe side. |
1278 | auto& metadata = curInstruction->as<OpResolveScope>().metadata(this); |
1279 | WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable; |
1280 | if (!symbolTable || vm.heap.isMarked(symbolTable.get())) |
1281 | break; |
1282 | if (Options::verboseOSR()) |
1283 | dataLogF("Clearing dead symbolTable %p.\n" , symbolTable.get()); |
1284 | symbolTable.clear(); |
1285 | break; |
1286 | } |
1287 | case op_get_from_scope: |
1288 | handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this)); |
1289 | break; |
1290 | case op_put_to_scope: |
1291 | handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this)); |
1292 | break; |
1293 | default: |
1294 | OpcodeID opcodeID = curInstruction->opcodeID(); |
1295 | ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u" , opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]); |
1296 | } |
1297 | } |
1298 | |
1299 | // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set |
1300 | // then cleared the cache without GCing in between. |
1301 | m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool { |
1302 | auto clear = [&] () { |
1303 | auto& instruction = instructions().at(std::get<1>(pair.key)); |
1304 | OpcodeID opcode = instruction->opcodeID(); |
1305 | if (opcode == op_get_by_id) { |
1306 | if (Options::verboseOSR()) |
1307 | dataLogF("Clearing LLInt property access.\n" ); |
1308 | LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this)); |
1309 | } |
1310 | return true; |
1311 | }; |
1312 | |
1313 | if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key)))) |
1314 | return clear(); |
1315 | |
1316 | for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) { |
1317 | if (!watchpoint.key().isStillLive(vm)) |
1318 | return clear(); |
1319 | } |
1320 | |
1321 | return false; |
1322 | }); |
1323 | |
1324 | forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) { |
1325 | if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) { |
1326 | if (Options::verboseOSR()) |
1327 | dataLog("Clearing LLInt call from " , *this, "\n" ); |
1328 | callLinkInfo.unlink(); |
1329 | } |
1330 | if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee())) |
1331 | callLinkInfo.clearLastSeenCallee(); |
1332 | }); |
1333 | } |
1334 | |
1335 | #if ENABLE(JIT) |
1336 | CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&) |
1337 | { |
1338 | ASSERT(!m_jitData); |
1339 | m_jitData = std::make_unique<JITData>(); |
1340 | return *m_jitData; |
1341 | } |
1342 | |
1343 | void CodeBlock::finalizeBaselineJITInlineCaches() |
1344 | { |
1345 | if (auto* jitData = m_jitData.get()) { |
1346 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) |
1347 | callLinkInfo->visitWeak(*vm()); |
1348 | |
1349 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) |
1350 | stubInfo->visitWeakReferences(this); |
1351 | } |
1352 | } |
1353 | #endif |
1354 | |
1355 | void CodeBlock::finalizeUnconditionally(VM& vm) |
1356 | { |
1357 | UNUSED_PARAM(vm); |
1358 | |
1359 | updateAllPredictions(); |
1360 | |
1361 | if (JITCode::couldBeInterpreted(jitType())) |
1362 | finalizeLLIntInlineCaches(); |
1363 | |
1364 | #if ENABLE(JIT) |
1365 | if (!!jitCode()) |
1366 | finalizeBaselineJITInlineCaches(); |
1367 | #endif |
1368 | |
1369 | #if ENABLE(DFG_JIT) |
1370 | if (JITCode::isOptimizingJIT(jitType())) { |
1371 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1372 | dfgCommon->recordedStatuses.finalize(vm); |
1373 | } |
1374 | #endif // ENABLE(DFG_JIT) |
1375 | |
1376 | auto updateActivity = [&] { |
1377 | if (!VM::useUnlinkedCodeBlockJettisoning()) |
1378 | return; |
1379 | JITCode* jitCode = m_jitCode.get(); |
1380 | double count = 0; |
1381 | bool alwaysActive = false; |
1382 | switch (JITCode::jitTypeFor(jitCode)) { |
1383 | case JITType::None: |
1384 | case JITType::HostCallThunk: |
1385 | return; |
1386 | case JITType::InterpreterThunk: |
1387 | count = m_llintExecuteCounter.count(); |
1388 | break; |
1389 | case JITType::BaselineJIT: |
1390 | count = m_jitExecuteCounter.count(); |
1391 | break; |
1392 | case JITType::DFGJIT: |
1393 | #if ENABLE(FTL_JIT) |
1394 | count = static_cast<DFG::JITCode*>(jitCode)->tierUpCounter.count(); |
1395 | #else |
1396 | alwaysActive = true; |
1397 | #endif |
1398 | break; |
1399 | case JITType::FTLJIT: |
1400 | alwaysActive = true; |
1401 | break; |
1402 | } |
1403 | if (alwaysActive || m_previousCounter < count) { |
1404 | // CodeBlock is active right now, so resetting UnlinkedCodeBlock's age. |
1405 | m_unlinkedCode->resetAge(); |
1406 | } |
1407 | m_previousCounter = count; |
1408 | }; |
1409 | updateActivity(); |
1410 | |
1411 | VM::SpaceAndSet::setFor(*subspace()).remove(this); |
1412 | } |
1413 | |
1414 | void CodeBlock::destroy(JSCell* cell) |
1415 | { |
1416 | static_cast<CodeBlock*>(cell)->~CodeBlock(); |
1417 | } |
1418 | |
1419 | void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result) |
1420 | { |
1421 | #if ENABLE(JIT) |
1422 | if (JITCode::isJIT(jitType())) { |
1423 | if (auto* jitData = m_jitData.get()) { |
1424 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) |
1425 | result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo; |
1426 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) |
1427 | result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo; |
1428 | for (ByValInfo* byValInfo : jitData->m_byValInfos) |
1429 | result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo; |
1430 | } |
1431 | #if ENABLE(DFG_JIT) |
1432 | if (JITCode::isOptimizingJIT(jitType())) { |
1433 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1434 | for (auto& pair : dfgCommon->recordedStatuses.calls) |
1435 | result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get(); |
1436 | for (auto& pair : dfgCommon->recordedStatuses.gets) |
1437 | result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get(); |
1438 | for (auto& pair : dfgCommon->recordedStatuses.puts) |
1439 | result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get(); |
1440 | for (auto& pair : dfgCommon->recordedStatuses.ins) |
1441 | result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get(); |
1442 | } |
1443 | #endif |
1444 | } |
1445 | #else |
1446 | UNUSED_PARAM(result); |
1447 | #endif |
1448 | } |
1449 | |
1450 | void CodeBlock::getICStatusMap(ICStatusMap& result) |
1451 | { |
1452 | ConcurrentJSLocker locker(m_lock); |
1453 | getICStatusMap(locker, result); |
1454 | } |
1455 | |
1456 | #if ENABLE(JIT) |
1457 | StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType) |
1458 | { |
1459 | ConcurrentJSLocker locker(m_lock); |
1460 | return ensureJITData(locker).m_stubInfos.add(accessType); |
1461 | } |
1462 | |
1463 | JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile) |
1464 | { |
1465 | ConcurrentJSLocker locker(m_lock); |
1466 | return ensureJITData(locker).m_addICs.add(arithProfile); |
1467 | } |
1468 | |
1469 | JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile) |
1470 | { |
1471 | ConcurrentJSLocker locker(m_lock); |
1472 | return ensureJITData(locker).m_mulICs.add(arithProfile); |
1473 | } |
1474 | |
1475 | JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile) |
1476 | { |
1477 | ConcurrentJSLocker locker(m_lock); |
1478 | return ensureJITData(locker).m_subICs.add(arithProfile); |
1479 | } |
1480 | |
1481 | JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile) |
1482 | { |
1483 | ConcurrentJSLocker locker(m_lock); |
1484 | return ensureJITData(locker).m_negICs.add(arithProfile); |
1485 | } |
1486 | |
1487 | StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin) |
1488 | { |
1489 | ConcurrentJSLocker locker(m_lock); |
1490 | if (auto* jitData = m_jitData.get()) { |
1491 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) { |
1492 | if (stubInfo->codeOrigin == codeOrigin) |
1493 | return stubInfo; |
1494 | } |
1495 | } |
1496 | return nullptr; |
1497 | } |
1498 | |
1499 | ByValInfo* CodeBlock::addByValInfo() |
1500 | { |
1501 | ConcurrentJSLocker locker(m_lock); |
1502 | return ensureJITData(locker).m_byValInfos.add(); |
1503 | } |
1504 | |
1505 | CallLinkInfo* CodeBlock::addCallLinkInfo() |
1506 | { |
1507 | ConcurrentJSLocker locker(m_lock); |
1508 | return ensureJITData(locker).m_callLinkInfos.add(); |
1509 | } |
1510 | |
1511 | CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index) |
1512 | { |
1513 | ConcurrentJSLocker locker(m_lock); |
1514 | if (auto* jitData = m_jitData.get()) { |
1515 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) { |
1516 | if (callLinkInfo->codeOrigin() == CodeOrigin(index)) |
1517 | return callLinkInfo; |
1518 | } |
1519 | } |
1520 | return nullptr; |
1521 | } |
1522 | |
1523 | RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset) |
1524 | { |
1525 | ConcurrentJSLocker locker(m_lock); |
1526 | auto& jitData = ensureJITData(locker); |
1527 | jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset)); |
1528 | return &jitData.m_rareCaseProfiles.last(); |
1529 | } |
1530 | |
1531 | RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset) |
1532 | { |
1533 | if (auto* jitData = m_jitData.get()) { |
1534 | return tryBinarySearch<RareCaseProfile, int>( |
1535 | jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset, |
1536 | getRareCaseProfileBytecodeOffset); |
1537 | } |
1538 | return nullptr; |
1539 | } |
1540 | |
1541 | unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset) |
1542 | { |
1543 | RareCaseProfile* profile = rareCaseProfileForBytecodeOffset(locker, bytecodeOffset); |
1544 | if (profile) |
1545 | return profile->m_counter; |
1546 | return 0; |
1547 | } |
1548 | |
1549 | void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters) |
1550 | { |
1551 | ConcurrentJSLocker locker(m_lock); |
1552 | ensureJITData(locker).m_calleeSaveRegisters = std::make_unique<RegisterAtOffsetList>(calleeSaveRegisters); |
1553 | } |
1554 | |
1555 | void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList) |
1556 | { |
1557 | ConcurrentJSLocker locker(m_lock); |
1558 | ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList); |
1559 | } |
1560 | |
1561 | void CodeBlock::resetJITData() |
1562 | { |
1563 | RELEASE_ASSERT(!JITCode::isJIT(jitType())); |
1564 | ConcurrentJSLocker locker(m_lock); |
1565 | |
1566 | if (auto* jitData = m_jitData.get()) { |
1567 | // We can clear these because no other thread will have references to any stub infos, call |
1568 | // link infos, or by val infos if we don't have JIT code. Attempts to query these data |
1569 | // structures using the concurrent API (getICStatusMap and friends) will return nothing if we |
1570 | // don't have JIT code. |
1571 | jitData->m_stubInfos.clear(); |
1572 | jitData->m_callLinkInfos.clear(); |
1573 | jitData->m_byValInfos.clear(); |
1574 | // We can clear this because the DFG's queries to these data structures are guarded by whether |
1575 | // there is JIT code. |
1576 | jitData->m_rareCaseProfiles.clear(); |
1577 | } |
1578 | } |
1579 | #endif |
1580 | |
1581 | void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1582 | { |
1583 | // We strongly visit OSR exits targets because we don't want to deal with |
1584 | // the complexity of generating an exit target CodeBlock on demand and |
1585 | // guaranteeing that it matches the details of the CodeBlock we compiled |
1586 | // the OSR exit against. |
1587 | |
1588 | visitor.append(m_alternative); |
1589 | |
1590 | #if ENABLE(DFG_JIT) |
1591 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1592 | if (dfgCommon->inlineCallFrames) { |
1593 | for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) { |
1594 | ASSERT(inlineCallFrame->baselineCodeBlock); |
1595 | visitor.append(inlineCallFrame->baselineCodeBlock); |
1596 | } |
1597 | } |
1598 | #endif |
1599 | } |
1600 | |
1601 | void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor) |
1602 | { |
1603 | UNUSED_PARAM(locker); |
1604 | |
1605 | visitor.append(m_globalObject); |
1606 | visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked. |
1607 | visitor.append(m_unlinkedCode); |
1608 | if (m_rareData) |
1609 | m_rareData->m_directEvalCodeCache.visitAggregate(visitor); |
1610 | visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size()); |
1611 | for (auto& functionExpr : m_functionExprs) |
1612 | visitor.append(functionExpr); |
1613 | for (auto& functionDecl : m_functionDecls) |
1614 | visitor.append(functionDecl); |
1615 | forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) { |
1616 | objectAllocationProfile.visitAggregate(visitor); |
1617 | }); |
1618 | |
1619 | #if ENABLE(JIT) |
1620 | if (auto* jitData = m_jitData.get()) { |
1621 | for (ByValInfo* byValInfo : jitData->m_byValInfos) |
1622 | visitor.append(byValInfo->cachedSymbol); |
1623 | } |
1624 | #endif |
1625 | |
1626 | #if ENABLE(DFG_JIT) |
1627 | if (JITCode::isOptimizingJIT(jitType())) |
1628 | visitOSRExitTargets(locker, visitor); |
1629 | #endif |
1630 | } |
1631 | |
1632 | void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1633 | { |
1634 | UNUSED_PARAM(visitor); |
1635 | |
1636 | #if ENABLE(DFG_JIT) |
1637 | if (!JITCode::isOptimizingJIT(jitType())) |
1638 | return; |
1639 | |
1640 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1641 | |
1642 | for (auto& transition : dfgCommon->transitions) { |
1643 | if (!!transition.m_codeOrigin) |
1644 | visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though. |
1645 | visitor.append(transition.m_from); |
1646 | visitor.append(transition.m_to); |
1647 | } |
1648 | |
1649 | for (auto& weakReference : dfgCommon->weakReferences) |
1650 | visitor.append(weakReference); |
1651 | |
1652 | for (auto& weakStructureReference : dfgCommon->weakStructureReferences) |
1653 | visitor.append(weakStructureReference); |
1654 | |
1655 | dfgCommon->livenessHasBeenProved = true; |
1656 | #endif |
1657 | } |
1658 | |
1659 | CodeBlock* CodeBlock::baselineAlternative() |
1660 | { |
1661 | #if ENABLE(JIT) |
1662 | CodeBlock* result = this; |
1663 | while (result->alternative()) |
1664 | result = result->alternative(); |
1665 | RELEASE_ASSERT(result); |
1666 | RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None); |
1667 | return result; |
1668 | #else |
1669 | return this; |
1670 | #endif |
1671 | } |
1672 | |
1673 | CodeBlock* CodeBlock::baselineVersion() |
1674 | { |
1675 | #if ENABLE(JIT) |
1676 | JITType selfJITType = jitType(); |
1677 | if (JITCode::isBaselineCode(selfJITType)) |
1678 | return this; |
1679 | CodeBlock* result = replacement(); |
1680 | if (!result) { |
1681 | if (JITCode::isOptimizingJIT(selfJITType)) { |
1682 | // The replacement can be null if we've had a memory clean up and the executable |
1683 | // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless, |
1684 | // the current codeBlock is still live on the stack, and as an optimizing JIT |
1685 | // codeBlock, it will keep its baselineAlternative() alive for us to fetch below. |
1686 | result = this; |
1687 | } else { |
1688 | // This can happen if we're creating the original CodeBlock for an executable. |
1689 | // Assume that we're the baseline CodeBlock. |
1690 | RELEASE_ASSERT(selfJITType == JITType::None); |
1691 | return this; |
1692 | } |
1693 | } |
1694 | result = result->baselineAlternative(); |
1695 | ASSERT(result); |
1696 | return result; |
1697 | #else |
1698 | return this; |
1699 | #endif |
1700 | } |
1701 | |
1702 | #if ENABLE(JIT) |
1703 | bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace) |
1704 | { |
1705 | CodeBlock* replacement = this->replacement(); |
1706 | return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace); |
1707 | } |
1708 | |
1709 | bool CodeBlock::hasOptimizedReplacement() |
1710 | { |
1711 | return hasOptimizedReplacement(jitType()); |
1712 | } |
1713 | #endif |
1714 | |
1715 | HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler requiredHandler) |
1716 | { |
1717 | RELEASE_ASSERT(bytecodeOffset < instructions().size()); |
1718 | return handlerForIndex(bytecodeOffset, requiredHandler); |
1719 | } |
1720 | |
1721 | HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler) |
1722 | { |
1723 | if (!m_rareData) |
1724 | return 0; |
1725 | return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler); |
1726 | } |
1727 | |
1728 | DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite) |
1729 | { |
1730 | #if ENABLE(DFG_JIT) |
1731 | RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType())); |
1732 | RELEASE_ASSERT(canGetCodeOrigin(originalCallSite)); |
1733 | ASSERT(!!handlerForIndex(originalCallSite.bits())); |
1734 | CodeOrigin originalOrigin = codeOrigin(originalCallSite); |
1735 | return m_jitCode->dfgCommon()->addDisposableCallSiteIndex(originalOrigin); |
1736 | #else |
1737 | // We never create new on-the-fly exception handling |
1738 | // call sites outside the DFG/FTL inline caches. |
1739 | UNUSED_PARAM(originalCallSite); |
1740 | RELEASE_ASSERT_NOT_REACHED(); |
1741 | return DisposableCallSiteIndex(0u); |
1742 | #endif |
1743 | } |
1744 | |
1745 | |
1746 | |
1747 | void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset) |
1748 | { |
1749 | auto& instruction = instructions().at(bytecodeOffset); |
1750 | OpCatch op = instruction->as<OpCatch>(); |
1751 | auto& metadata = op.metadata(this); |
1752 | if (!!metadata.m_buffer) { |
1753 | #if !ASSERT_DISABLED |
1754 | ConcurrentJSLocker locker(m_lock); |
1755 | bool found = false; |
1756 | auto* rareData = m_rareData.get(); |
1757 | ASSERT(rareData); |
1758 | for (auto& profile : rareData->m_catchProfiles) { |
1759 | if (profile.get() == metadata.m_buffer) { |
1760 | found = true; |
1761 | break; |
1762 | } |
1763 | } |
1764 | ASSERT(found); |
1765 | #endif |
1766 | return; |
1767 | } |
1768 | |
1769 | ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset); |
1770 | } |
1771 | |
1772 | void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset) |
1773 | { |
1774 | BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis(); |
1775 | |
1776 | // We get the live-out set of variables at op_catch, not the live-in. This |
1777 | // is because the variables that the op_catch defines might be dead, and |
1778 | // we can avoid profiling them and extracting them when doing OSR entry |
1779 | // into the DFG. |
1780 | |
1781 | auto nextOffset = instructions().at(bytecodeOffset).next().offset(); |
1782 | FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset); |
1783 | Vector<VirtualRegister> liveOperands; |
1784 | liveOperands.reserveInitialCapacity(liveLocals.bitCount()); |
1785 | liveLocals.forEachSetBit([&] (unsigned liveLocal) { |
1786 | liveOperands.append(virtualRegisterForLocal(liveLocal)); |
1787 | }); |
1788 | |
1789 | for (int i = 0; i < numParameters(); ++i) |
1790 | liveOperands.append(virtualRegisterForArgument(i)); |
1791 | |
1792 | auto profiles = std::make_unique<ValueProfileAndOperandBuffer>(liveOperands.size()); |
1793 | RELEASE_ASSERT(profiles->m_size == liveOperands.size()); |
1794 | for (unsigned i = 0; i < profiles->m_size; ++i) |
1795 | profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset(); |
1796 | |
1797 | createRareDataIfNecessary(); |
1798 | |
1799 | // The compiler thread will read this pointer value and then proceed to dereference it |
1800 | // if it is not null. We need to make sure all above stores happen before this store so |
1801 | // the compiler thread reads fully initialized data. |
1802 | WTF::storeStoreFence(); |
1803 | |
1804 | op.metadata(this).m_buffer = profiles.get(); |
1805 | { |
1806 | ConcurrentJSLocker locker(m_lock); |
1807 | m_rareData->m_catchProfiles.append(WTFMove(profiles)); |
1808 | } |
1809 | } |
1810 | |
1811 | void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSiteIndex) |
1812 | { |
1813 | RELEASE_ASSERT(m_rareData); |
1814 | Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers; |
1815 | unsigned index = callSiteIndex.bits(); |
1816 | for (size_t i = 0; i < exceptionHandlers.size(); ++i) { |
1817 | HandlerInfo& handler = exceptionHandlers[i]; |
1818 | if (handler.start <= index && handler.end > index) { |
1819 | exceptionHandlers.remove(i); |
1820 | return; |
1821 | } |
1822 | } |
1823 | |
1824 | RELEASE_ASSERT_NOT_REACHED(); |
1825 | } |
1826 | |
1827 | unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) |
1828 | { |
1829 | RELEASE_ASSERT(bytecodeOffset < instructions().size()); |
1830 | return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset); |
1831 | } |
1832 | |
1833 | unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset) |
1834 | { |
1835 | int divot; |
1836 | int startOffset; |
1837 | int endOffset; |
1838 | unsigned line; |
1839 | unsigned column; |
1840 | expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column); |
1841 | return column; |
1842 | } |
1843 | |
1844 | void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const |
1845 | { |
1846 | m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column); |
1847 | divot += sourceOffset(); |
1848 | column += line ? 1 : firstLineColumnOffset(); |
1849 | line += ownerExecutable()->firstLine(); |
1850 | } |
1851 | |
1852 | bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column) |
1853 | { |
1854 | const InstructionStream& instructionStream = instructions(); |
1855 | for (const auto& it : instructionStream) { |
1856 | if (it->is<OpDebug>()) { |
1857 | int unused; |
1858 | unsigned opDebugLine; |
1859 | unsigned opDebugColumn; |
1860 | expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn); |
1861 | if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn)) |
1862 | return true; |
1863 | } |
1864 | } |
1865 | return false; |
1866 | } |
1867 | |
1868 | void CodeBlock::shrinkToFit(ShrinkMode shrinkMode) |
1869 | { |
1870 | ConcurrentJSLocker locker(m_lock); |
1871 | |
1872 | #if ENABLE(JIT) |
1873 | if (auto* jitData = m_jitData.get()) |
1874 | jitData->m_rareCaseProfiles.shrinkToFit(); |
1875 | #endif |
1876 | |
1877 | if (shrinkMode == EarlyShrink) { |
1878 | m_constantRegisters.shrinkToFit(); |
1879 | m_constantsSourceCodeRepresentation.shrinkToFit(); |
1880 | |
1881 | if (m_rareData) { |
1882 | m_rareData->m_switchJumpTables.shrinkToFit(); |
1883 | m_rareData->m_stringSwitchJumpTables.shrinkToFit(); |
1884 | } |
1885 | } // else don't shrink these, because we would have already pointed pointers into these tables. |
1886 | } |
1887 | |
1888 | #if ENABLE(JIT) |
1889 | void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming) |
1890 | { |
1891 | noticeIncomingCall(callerFrame); |
1892 | ConcurrentJSLocker locker(m_lock); |
1893 | ensureJITData(locker).m_incomingCalls.push(incoming); |
1894 | } |
1895 | |
1896 | void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming) |
1897 | { |
1898 | noticeIncomingCall(callerFrame); |
1899 | { |
1900 | ConcurrentJSLocker locker(m_lock); |
1901 | ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming); |
1902 | } |
1903 | } |
1904 | #endif // ENABLE(JIT) |
1905 | |
1906 | void CodeBlock::unlinkIncomingCalls() |
1907 | { |
1908 | while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end()) |
1909 | m_incomingLLIntCalls.begin()->unlink(); |
1910 | #if ENABLE(JIT) |
1911 | JITData* jitData = nullptr; |
1912 | { |
1913 | ConcurrentJSLocker locker(m_lock); |
1914 | jitData = m_jitData.get(); |
1915 | } |
1916 | if (jitData) { |
1917 | while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end()) |
1918 | jitData->m_incomingCalls.begin()->unlink(*vm()); |
1919 | while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end()) |
1920 | jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm()); |
1921 | } |
1922 | #endif // ENABLE(JIT) |
1923 | } |
1924 | |
1925 | void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming) |
1926 | { |
1927 | noticeIncomingCall(callerFrame); |
1928 | m_incomingLLIntCalls.push(incoming); |
1929 | } |
1930 | |
1931 | CodeBlock* CodeBlock::newReplacement() |
1932 | { |
1933 | return ownerExecutable()->newReplacementCodeBlockFor(specializationKind()); |
1934 | } |
1935 | |
1936 | #if ENABLE(JIT) |
1937 | CodeBlock* CodeBlock::replacement() |
1938 | { |
1939 | const ClassInfo* classInfo = this->classInfo(*vm()); |
1940 | |
1941 | if (classInfo == FunctionCodeBlock::info()) |
1942 | return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall); |
1943 | |
1944 | if (classInfo == EvalCodeBlock::info()) |
1945 | return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock(); |
1946 | |
1947 | if (classInfo == ProgramCodeBlock::info()) |
1948 | return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock(); |
1949 | |
1950 | if (classInfo == ModuleProgramCodeBlock::info()) |
1951 | return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock(); |
1952 | |
1953 | RELEASE_ASSERT_NOT_REACHED(); |
1954 | return nullptr; |
1955 | } |
1956 | |
1957 | DFG::CapabilityLevel CodeBlock::computeCapabilityLevel() |
1958 | { |
1959 | const ClassInfo* classInfo = this->classInfo(*vm()); |
1960 | |
1961 | if (classInfo == FunctionCodeBlock::info()) { |
1962 | if (isConstructor()) |
1963 | return DFG::functionForConstructCapabilityLevel(this); |
1964 | return DFG::functionForCallCapabilityLevel(this); |
1965 | } |
1966 | |
1967 | if (classInfo == EvalCodeBlock::info()) |
1968 | return DFG::evalCapabilityLevel(this); |
1969 | |
1970 | if (classInfo == ProgramCodeBlock::info()) |
1971 | return DFG::programCapabilityLevel(this); |
1972 | |
1973 | if (classInfo == ModuleProgramCodeBlock::info()) |
1974 | return DFG::programCapabilityLevel(this); |
1975 | |
1976 | RELEASE_ASSERT_NOT_REACHED(); |
1977 | return DFG::CannotCompile; |
1978 | } |
1979 | |
1980 | #endif // ENABLE(JIT) |
1981 | |
1982 | void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail) |
1983 | { |
1984 | #if !ENABLE(DFG_JIT) |
1985 | UNUSED_PARAM(mode); |
1986 | UNUSED_PARAM(detail); |
1987 | #endif |
1988 | |
1989 | VM& vm = *m_vm; |
1990 | |
1991 | CODEBLOCK_LOG_EVENT(this, "jettison" , ("due to " , reason, ", counting = " , mode == CountReoptimization, ", detail = " , pointerDump(detail))); |
1992 | |
1993 | RELEASE_ASSERT(reason != Profiler::NotJettisoned); |
1994 | |
1995 | #if ENABLE(DFG_JIT) |
1996 | if (DFG::shouldDumpDisassembly()) { |
1997 | dataLog("Jettisoning " , *this); |
1998 | if (mode == CountReoptimization) |
1999 | dataLog(" and counting reoptimization" ); |
2000 | dataLog(" due to " , reason); |
2001 | if (detail) |
2002 | dataLog(", " , *detail); |
2003 | dataLog(".\n" ); |
2004 | } |
2005 | |
2006 | if (reason == Profiler::JettisonDueToWeakReference) { |
2007 | if (DFG::shouldDumpDisassembly()) { |
2008 | dataLog(*this, " will be jettisoned because of the following dead references:\n" ); |
2009 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
2010 | for (auto& transition : dfgCommon->transitions) { |
2011 | JSCell* origin = transition.m_codeOrigin.get(); |
2012 | JSCell* from = transition.m_from.get(); |
2013 | JSCell* to = transition.m_to.get(); |
2014 | if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from)) |
2015 | continue; |
2016 | dataLog(" Transition under " , RawPointer(origin), ", " , RawPointer(from), " -> " , RawPointer(to), ".\n" ); |
2017 | } |
2018 | for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { |
2019 | JSCell* weak = dfgCommon->weakReferences[i].get(); |
2020 | if (vm.heap.isMarked(weak)) |
2021 | continue; |
2022 | dataLog(" Weak reference " , RawPointer(weak), ".\n" ); |
2023 | } |
2024 | } |
2025 | } |
2026 | #endif // ENABLE(DFG_JIT) |
2027 | |
2028 | DeferGCForAWhile deferGC(*heap()); |
2029 | |
2030 | // We want to accomplish two things here: |
2031 | // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it |
2032 | // we should OSR exit at the top of the next bytecode instruction after the return. |
2033 | // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock. |
2034 | |
2035 | #if ENABLE(DFG_JIT) |
2036 | if (JITCode::isOptimizingJIT(jitType())) |
2037 | jitCode()->dfgCommon()->clearWatchpoints(); |
2038 | |
2039 | if (reason != Profiler::JettisonDueToOldAge) { |
2040 | Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get(); |
2041 | if (UNLIKELY(compilation)) |
2042 | compilation->setJettisonReason(reason, detail); |
2043 | |
2044 | // This accomplishes (1), and does its own book-keeping about whether it has already happened. |
2045 | if (!jitCode()->dfgCommon()->invalidate()) { |
2046 | // We've already been invalidated. |
2047 | RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))); |
2048 | return; |
2049 | } |
2050 | } |
2051 | |
2052 | if (DFG::shouldDumpDisassembly()) |
2053 | dataLog(" Did invalidate " , *this, "\n" ); |
2054 | |
2055 | // Count the reoptimization if that's what the user wanted. |
2056 | if (mode == CountReoptimization) { |
2057 | // FIXME: Maybe this should call alternative(). |
2058 | // https://bugs.webkit.org/show_bug.cgi?id=123677 |
2059 | baselineAlternative()->countReoptimization(); |
2060 | if (DFG::shouldDumpDisassembly()) |
2061 | dataLog(" Did count reoptimization for " , *this, "\n" ); |
2062 | } |
2063 | |
2064 | if (this != replacement()) { |
2065 | // This means that we were never the entrypoint. This can happen for OSR entry code |
2066 | // blocks. |
2067 | return; |
2068 | } |
2069 | |
2070 | if (alternative()) |
2071 | alternative()->optimizeAfterWarmUp(); |
2072 | |
2073 | if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps) |
2074 | tallyFrequentExitSites(); |
2075 | #endif // ENABLE(DFG_JIT) |
2076 | |
2077 | // Jettison can happen during GC. We don't want to install code to a dead executable |
2078 | // because that would add a dead object to the remembered set. |
2079 | if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())) |
2080 | return; |
2081 | |
2082 | #if ENABLE(JIT) |
2083 | { |
2084 | ConcurrentJSLocker locker(m_lock); |
2085 | if (JITData* jitData = m_jitData.get()) { |
2086 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) |
2087 | callLinkInfo->setClearedByJettison(); |
2088 | } |
2089 | } |
2090 | #endif |
2091 | |
2092 | // This accomplishes (2). |
2093 | ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind()); |
2094 | |
2095 | #if ENABLE(DFG_JIT) |
2096 | if (DFG::shouldDumpDisassembly()) |
2097 | dataLog(" Did install baseline version of " , *this, "\n" ); |
2098 | #endif // ENABLE(DFG_JIT) |
2099 | } |
2100 | |
2101 | JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin) |
2102 | { |
2103 | auto* inlineCallFrame = codeOrigin.inlineCallFrame(); |
2104 | if (!inlineCallFrame) |
2105 | return globalObject(); |
2106 | return inlineCallFrame->baselineCodeBlock->globalObject(); |
2107 | } |
2108 | |
2109 | class RecursionCheckFunctor { |
2110 | public: |
2111 | RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck) |
2112 | : m_startCallFrame(startCallFrame) |
2113 | , m_codeBlock(codeBlock) |
2114 | , m_depthToCheck(depthToCheck) |
2115 | , m_foundStartCallFrame(false) |
2116 | , m_didRecurse(false) |
2117 | { } |
2118 | |
2119 | StackVisitor::Status operator()(StackVisitor& visitor) const |
2120 | { |
2121 | CallFrame* currentCallFrame = visitor->callFrame(); |
2122 | |
2123 | if (currentCallFrame == m_startCallFrame) |
2124 | m_foundStartCallFrame = true; |
2125 | |
2126 | if (m_foundStartCallFrame) { |
2127 | if (visitor->callFrame()->codeBlock() == m_codeBlock) { |
2128 | m_didRecurse = true; |
2129 | return StackVisitor::Done; |
2130 | } |
2131 | |
2132 | if (!m_depthToCheck--) |
2133 | return StackVisitor::Done; |
2134 | } |
2135 | |
2136 | return StackVisitor::Continue; |
2137 | } |
2138 | |
2139 | bool didRecurse() const { return m_didRecurse; } |
2140 | |
2141 | private: |
2142 | CallFrame* m_startCallFrame; |
2143 | CodeBlock* m_codeBlock; |
2144 | mutable unsigned m_depthToCheck; |
2145 | mutable bool m_foundStartCallFrame; |
2146 | mutable bool m_didRecurse; |
2147 | }; |
2148 | |
2149 | void CodeBlock::noticeIncomingCall(ExecState* callerFrame) |
2150 | { |
2151 | CodeBlock* callerCodeBlock = callerFrame->codeBlock(); |
2152 | |
2153 | if (Options::verboseCallLink()) |
2154 | dataLog("Noticing call link from " , pointerDump(callerCodeBlock), " to " , *this, "\n" ); |
2155 | |
2156 | #if ENABLE(DFG_JIT) |
2157 | if (!m_shouldAlwaysBeInlined) |
2158 | return; |
2159 | |
2160 | if (!callerCodeBlock) { |
2161 | m_shouldAlwaysBeInlined = false; |
2162 | if (Options::verboseCallLink()) |
2163 | dataLog(" Clearing SABI because caller is native.\n" ); |
2164 | return; |
2165 | } |
2166 | |
2167 | if (!hasBaselineJITProfiling()) |
2168 | return; |
2169 | |
2170 | if (!DFG::mightInlineFunction(this)) |
2171 | return; |
2172 | |
2173 | if (!canInline(capabilityLevelState())) |
2174 | return; |
2175 | |
2176 | if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) { |
2177 | m_shouldAlwaysBeInlined = false; |
2178 | if (Options::verboseCallLink()) |
2179 | dataLog(" Clearing SABI because caller is too large.\n" ); |
2180 | return; |
2181 | } |
2182 | |
2183 | if (callerCodeBlock->jitType() == JITType::InterpreterThunk) { |
2184 | // If the caller is still in the interpreter, then we can't expect inlining to |
2185 | // happen anytime soon. Assume it's profitable to optimize it separately. This |
2186 | // ensures that a function is SABI only if it is called no more frequently than |
2187 | // any of its callers. |
2188 | m_shouldAlwaysBeInlined = false; |
2189 | if (Options::verboseCallLink()) |
2190 | dataLog(" Clearing SABI because caller is in LLInt.\n" ); |
2191 | return; |
2192 | } |
2193 | |
2194 | if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) { |
2195 | m_shouldAlwaysBeInlined = false; |
2196 | if (Options::verboseCallLink()) |
2197 | dataLog(" Clearing SABI bcause caller was already optimized.\n" ); |
2198 | return; |
2199 | } |
2200 | |
2201 | if (callerCodeBlock->codeType() != FunctionCode) { |
2202 | // If the caller is either eval or global code, assume that that won't be |
2203 | // optimized anytime soon. For eval code this is particularly true since we |
2204 | // delay eval optimization by a *lot*. |
2205 | m_shouldAlwaysBeInlined = false; |
2206 | if (Options::verboseCallLink()) |
2207 | dataLog(" Clearing SABI because caller is not a function.\n" ); |
2208 | return; |
2209 | } |
2210 | |
2211 | // Recursive calls won't be inlined. |
2212 | RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth()); |
2213 | vm()->topCallFrame->iterate(functor); |
2214 | |
2215 | if (functor.didRecurse()) { |
2216 | if (Options::verboseCallLink()) |
2217 | dataLog(" Clearing SABI because recursion was detected.\n" ); |
2218 | m_shouldAlwaysBeInlined = false; |
2219 | return; |
2220 | } |
2221 | |
2222 | if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) { |
2223 | dataLog("In call from " , FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to " , *this, ": caller's DFG capability level is not set.\n" ); |
2224 | CRASH(); |
2225 | } |
2226 | |
2227 | if (canCompile(callerCodeBlock->capabilityLevelState())) |
2228 | return; |
2229 | |
2230 | if (Options::verboseCallLink()) |
2231 | dataLog(" Clearing SABI because the caller is not a DFG candidate.\n" ); |
2232 | |
2233 | m_shouldAlwaysBeInlined = false; |
2234 | #endif |
2235 | } |
2236 | |
2237 | unsigned CodeBlock::reoptimizationRetryCounter() const |
2238 | { |
2239 | #if ENABLE(JIT) |
2240 | ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax()); |
2241 | return m_reoptimizationRetryCounter; |
2242 | #else |
2243 | return 0; |
2244 | #endif // ENABLE(JIT) |
2245 | } |
2246 | |
2247 | #if !ENABLE(C_LOOP) |
2248 | const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const |
2249 | { |
2250 | #if ENABLE(JIT) |
2251 | if (auto* jitData = m_jitData.get()) { |
2252 | if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get()) |
2253 | return registers; |
2254 | } |
2255 | #endif |
2256 | return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters(); |
2257 | } |
2258 | |
2259 | |
2260 | static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters) |
2261 | { |
2262 | |
2263 | return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register)); |
2264 | |
2265 | } |
2266 | |
2267 | size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() |
2268 | { |
2269 | return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters()); |
2270 | } |
2271 | |
2272 | size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters() |
2273 | { |
2274 | return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size()); |
2275 | } |
2276 | #endif |
2277 | |
2278 | #if ENABLE(JIT) |
2279 | |
2280 | void CodeBlock::countReoptimization() |
2281 | { |
2282 | m_reoptimizationRetryCounter++; |
2283 | if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax()) |
2284 | m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax(); |
2285 | } |
2286 | |
2287 | unsigned CodeBlock::numberOfDFGCompiles() |
2288 | { |
2289 | ASSERT(JITCode::isBaselineCode(jitType())); |
2290 | if (Options::testTheFTL()) { |
2291 | if (m_didFailFTLCompilation) |
2292 | return 1000000; |
2293 | return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter; |
2294 | } |
2295 | CodeBlock* replacement = this->replacement(); |
2296 | return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter; |
2297 | } |
2298 | |
2299 | int32_t CodeBlock::codeTypeThresholdMultiplier() const |
2300 | { |
2301 | if (codeType() == EvalCode) |
2302 | return Options::evalThresholdMultiplier(); |
2303 | |
2304 | return 1; |
2305 | } |
2306 | |
2307 | double CodeBlock::optimizationThresholdScalingFactor() |
2308 | { |
2309 | // This expression arises from doing a least-squares fit of |
2310 | // |
2311 | // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d |
2312 | // |
2313 | // against the data points: |
2314 | // |
2315 | // x F[x_] |
2316 | // 10 0.9 (smallest reasonable code block) |
2317 | // 200 1.0 (typical small-ish code block) |
2318 | // 320 1.2 (something I saw in 3d-cube that I wanted to optimize) |
2319 | // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize) |
2320 | // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort) |
2321 | // 10000 6.0 (similar to above) |
2322 | // |
2323 | // I achieve the minimization using the following Mathematica code: |
2324 | // |
2325 | // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d |
2326 | // |
2327 | // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}} |
2328 | // |
2329 | // solution = |
2330 | // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples), |
2331 | // {a, b, c, d}][[2]] |
2332 | // |
2333 | // And the code below (to initialize a, b, c, d) is generated by: |
2334 | // |
2335 | // Print["const double " <> ToString[#[[1]]] <> " = " <> |
2336 | // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution |
2337 | // |
2338 | // We've long known the following to be true: |
2339 | // - Small code blocks are cheap to optimize and so we should do it sooner rather |
2340 | // than later. |
2341 | // - Large code blocks are expensive to optimize and so we should postpone doing so, |
2342 | // and sometimes have a large enough threshold that we never optimize them. |
2343 | // - The difference in cost is not totally linear because (a) just invoking the |
2344 | // DFG incurs some base cost and (b) for large code blocks there is enough slop |
2345 | // in the correlation between instruction count and the actual compilation cost |
2346 | // that for those large blocks, the instruction count should not have a strong |
2347 | // influence on our threshold. |
2348 | // |
2349 | // I knew the goals but I didn't know how to achieve them; so I picked an interesting |
2350 | // example where the heuristics were right (code block in 3d-cube with instruction |
2351 | // count 320, which got compiled early as it should have been) and one where they were |
2352 | // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive |
2353 | // to compile and didn't run often enough to warrant compilation in my opinion), and |
2354 | // then threw in additional data points that represented my own guess of what our |
2355 | // heuristics should do for some round-numbered examples. |
2356 | // |
2357 | // The expression to which I decided to fit the data arose because I started with an |
2358 | // affine function, and then did two things: put the linear part in an Abs to ensure |
2359 | // that the fit didn't end up choosing a negative value of c (which would result in |
2360 | // the function turning over and going negative for large x) and I threw in a Sqrt |
2361 | // term because Sqrt represents my intution that the function should be more sensitive |
2362 | // to small changes in small values of x, but less sensitive when x gets large. |
2363 | |
2364 | // Note that the current fit essentially eliminates the linear portion of the |
2365 | // expression (c == 0.0). |
2366 | const double a = 0.061504; |
2367 | const double b = 1.02406; |
2368 | const double c = 0.0; |
2369 | const double d = 0.825914; |
2370 | |
2371 | double bytecodeCost = this->bytecodeCost(); |
2372 | |
2373 | ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense. |
2374 | |
2375 | double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost; |
2376 | |
2377 | result *= codeTypeThresholdMultiplier(); |
2378 | |
2379 | if (Options::verboseOSR()) { |
2380 | dataLog( |
2381 | *this, ": bytecode cost is " , bytecodeCost, |
2382 | ", scaling execution counter by " , result, " * " , codeTypeThresholdMultiplier(), |
2383 | "\n" ); |
2384 | } |
2385 | return result; |
2386 | } |
2387 | |
2388 | static int32_t clipThreshold(double threshold) |
2389 | { |
2390 | if (threshold < 1.0) |
2391 | return 1; |
2392 | |
2393 | if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max())) |
2394 | return std::numeric_limits<int32_t>::max(); |
2395 | |
2396 | return static_cast<int32_t>(threshold); |
2397 | } |
2398 | |
2399 | int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold) |
2400 | { |
2401 | return clipThreshold( |
2402 | static_cast<double>(desiredThreshold) * |
2403 | optimizationThresholdScalingFactor() * |
2404 | (1 << reoptimizationRetryCounter())); |
2405 | } |
2406 | |
2407 | bool CodeBlock::checkIfOptimizationThresholdReached() |
2408 | { |
2409 | #if ENABLE(DFG_JIT) |
2410 | if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) { |
2411 | if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode)) |
2412 | == DFG::Worklist::Compiled) { |
2413 | optimizeNextInvocation(); |
2414 | return true; |
2415 | } |
2416 | } |
2417 | #endif |
2418 | |
2419 | return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this); |
2420 | } |
2421 | |
2422 | #if ENABLE(DFG_JIT) |
2423 | auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction |
2424 | { |
2425 | DFG::OSRExitBase& exit = exitState.exit; |
2426 | if (!exitKindMayJettison(exit.m_kind)) { |
2427 | // FIXME: We may want to notice that we're frequently exiting |
2428 | // at an op_catch that we didn't compile an entrypoint for, and |
2429 | // then trigger a reoptimization of this CodeBlock: |
2430 | // https://bugs.webkit.org/show_bug.cgi?id=175842 |
2431 | return OptimizeAction::None; |
2432 | } |
2433 | |
2434 | exit.m_count++; |
2435 | m_osrExitCounter++; |
2436 | |
2437 | CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock; |
2438 | ASSERT(baselineCodeBlock == baselineAlternative()); |
2439 | if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold())) |
2440 | return OptimizeAction::ReoptimizeNow; |
2441 | |
2442 | // We want to figure out if there's a possibility that we're in a loop. For the outermost |
2443 | // code block in the inline stack, we handle this appropriately by having the loop OSR trigger |
2444 | // check the exit count of the replacement of the CodeBlock from which we are OSRing. The |
2445 | // problem is the inlined functions, which might also have loops, but whose baseline versions |
2446 | // don't know where to look for the exit count. Figure out if those loops are severe enough |
2447 | // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. |
2448 | // Otherwise, we should use the normal reoptimization trigger. |
2449 | |
2450 | bool didTryToEnterInLoop = false; |
2451 | for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) { |
2452 | if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) { |
2453 | didTryToEnterInLoop = true; |
2454 | break; |
2455 | } |
2456 | } |
2457 | |
2458 | uint32_t exitCountThreshold = didTryToEnterInLoop |
2459 | ? exitCountThresholdForReoptimizationFromLoop() |
2460 | : exitCountThresholdForReoptimization(); |
2461 | |
2462 | if (m_osrExitCounter > exitCountThreshold) |
2463 | return OptimizeAction::ReoptimizeNow; |
2464 | |
2465 | // Too few fails. Adjust the execution counter such that the target is to only optimize after a while. |
2466 | baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold); |
2467 | return OptimizeAction::None; |
2468 | } |
2469 | #endif |
2470 | |
2471 | void CodeBlock::optimizeNextInvocation() |
2472 | { |
2473 | if (Options::verboseOSR()) |
2474 | dataLog(*this, ": Optimizing next invocation.\n" ); |
2475 | m_jitExecuteCounter.setNewThreshold(0, this); |
2476 | } |
2477 | |
2478 | void CodeBlock::dontOptimizeAnytimeSoon() |
2479 | { |
2480 | if (Options::verboseOSR()) |
2481 | dataLog(*this, ": Not optimizing anytime soon.\n" ); |
2482 | m_jitExecuteCounter.deferIndefinitely(); |
2483 | } |
2484 | |
2485 | void CodeBlock::optimizeAfterWarmUp() |
2486 | { |
2487 | if (Options::verboseOSR()) |
2488 | dataLog(*this, ": Optimizing after warm-up.\n" ); |
2489 | #if ENABLE(DFG_JIT) |
2490 | m_jitExecuteCounter.setNewThreshold( |
2491 | adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this); |
2492 | #endif |
2493 | } |
2494 | |
2495 | void CodeBlock::optimizeAfterLongWarmUp() |
2496 | { |
2497 | if (Options::verboseOSR()) |
2498 | dataLog(*this, ": Optimizing after long warm-up.\n" ); |
2499 | #if ENABLE(DFG_JIT) |
2500 | m_jitExecuteCounter.setNewThreshold( |
2501 | adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this); |
2502 | #endif |
2503 | } |
2504 | |
2505 | void CodeBlock::optimizeSoon() |
2506 | { |
2507 | if (Options::verboseOSR()) |
2508 | dataLog(*this, ": Optimizing soon.\n" ); |
2509 | #if ENABLE(DFG_JIT) |
2510 | m_jitExecuteCounter.setNewThreshold( |
2511 | adjustedCounterValue(Options::thresholdForOptimizeSoon()), this); |
2512 | #endif |
2513 | } |
2514 | |
2515 | void CodeBlock::forceOptimizationSlowPathConcurrently() |
2516 | { |
2517 | if (Options::verboseOSR()) |
2518 | dataLog(*this, ": Forcing slow path concurrently.\n" ); |
2519 | m_jitExecuteCounter.forceSlowPathConcurrently(); |
2520 | } |
2521 | |
2522 | #if ENABLE(DFG_JIT) |
2523 | void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result) |
2524 | { |
2525 | JITType type = jitType(); |
2526 | if (type != JITType::BaselineJIT) { |
2527 | dataLog(*this, ": expected to have baseline code but have " , type, "\n" ); |
2528 | CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type)); |
2529 | } |
2530 | |
2531 | CodeBlock* replacement = this->replacement(); |
2532 | bool hasReplacement = (replacement && replacement != this); |
2533 | if ((result == CompilationSuccessful) != hasReplacement) { |
2534 | dataLog(*this, ": we have result = " , result, " but " ); |
2535 | if (replacement == this) |
2536 | dataLog("we are our own replacement.\n" ); |
2537 | else |
2538 | dataLog("our replacement is " , pointerDump(replacement), "\n" ); |
2539 | RELEASE_ASSERT_NOT_REACHED(); |
2540 | } |
2541 | |
2542 | switch (result) { |
2543 | case CompilationSuccessful: |
2544 | RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType())); |
2545 | optimizeNextInvocation(); |
2546 | return; |
2547 | case CompilationFailed: |
2548 | dontOptimizeAnytimeSoon(); |
2549 | return; |
2550 | case CompilationDeferred: |
2551 | // We'd like to do dontOptimizeAnytimeSoon() but we cannot because |
2552 | // forceOptimizationSlowPathConcurrently() is inherently racy. It won't |
2553 | // necessarily guarantee anything. So, we make sure that even if that |
2554 | // function ends up being a no-op, we still eventually retry and realize |
2555 | // that we have optimized code ready. |
2556 | optimizeAfterWarmUp(); |
2557 | return; |
2558 | case CompilationInvalidated: |
2559 | // Retry with exponential backoff. |
2560 | countReoptimization(); |
2561 | optimizeAfterWarmUp(); |
2562 | return; |
2563 | } |
2564 | |
2565 | dataLog("Unrecognized result: " , static_cast<int>(result), "\n" ); |
2566 | RELEASE_ASSERT_NOT_REACHED(); |
2567 | } |
2568 | |
2569 | #endif |
2570 | |
2571 | uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold) |
2572 | { |
2573 | ASSERT(JITCode::isOptimizingJIT(jitType())); |
2574 | // Compute this the lame way so we don't saturate. This is called infrequently |
2575 | // enough that this loop won't hurt us. |
2576 | unsigned result = desiredThreshold; |
2577 | for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) { |
2578 | unsigned newResult = result << 1; |
2579 | if (newResult < result) |
2580 | return std::numeric_limits<uint32_t>::max(); |
2581 | result = newResult; |
2582 | } |
2583 | return result; |
2584 | } |
2585 | |
2586 | uint32_t CodeBlock::exitCountThresholdForReoptimization() |
2587 | { |
2588 | return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier()); |
2589 | } |
2590 | |
2591 | uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop() |
2592 | { |
2593 | return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier()); |
2594 | } |
2595 | |
2596 | bool CodeBlock::shouldReoptimizeNow() |
2597 | { |
2598 | return osrExitCounter() >= exitCountThresholdForReoptimization(); |
2599 | } |
2600 | |
2601 | bool CodeBlock::shouldReoptimizeFromLoopNow() |
2602 | { |
2603 | return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop(); |
2604 | } |
2605 | #endif |
2606 | |
2607 | ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset) |
2608 | { |
2609 | auto instruction = instructions().at(bytecodeOffset); |
2610 | switch (instruction->opcodeID()) { |
2611 | #define CASE1(Op) \ |
2612 | case Op::opcodeID: \ |
2613 | return &instruction->as<Op>().metadata(this).m_arrayProfile; |
2614 | |
2615 | #define CASE2(Op) \ |
2616 | case Op::opcodeID: \ |
2617 | return &instruction->as<Op>().metadata(this).m_callLinkInfo.m_arrayProfile; |
2618 | |
2619 | FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE1) |
2620 | FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(CASE2) |
2621 | |
2622 | #undef CASE1 |
2623 | #undef CASE2 |
2624 | |
2625 | case OpGetById::opcodeID: { |
2626 | auto bytecode = instruction->as<OpGetById>(); |
2627 | auto& metadata = bytecode.metadata(this); |
2628 | if (metadata.m_modeMetadata.mode == GetByIdMode::ArrayLength) |
2629 | return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile; |
2630 | break; |
2631 | } |
2632 | default: |
2633 | break; |
2634 | } |
2635 | |
2636 | return nullptr; |
2637 | } |
2638 | |
2639 | ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset) |
2640 | { |
2641 | ConcurrentJSLocker locker(m_lock); |
2642 | return getArrayProfile(locker, bytecodeOffset); |
2643 | } |
2644 | |
2645 | #if ENABLE(DFG_JIT) |
2646 | Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins() |
2647 | { |
2648 | return m_jitCode->dfgCommon()->codeOrigins; |
2649 | } |
2650 | |
2651 | size_t CodeBlock::numberOfDFGIdentifiers() const |
2652 | { |
2653 | if (!JITCode::isOptimizingJIT(jitType())) |
2654 | return 0; |
2655 | |
2656 | return m_jitCode->dfgCommon()->dfgIdentifiers.size(); |
2657 | } |
2658 | |
2659 | const Identifier& CodeBlock::identifier(int index) const |
2660 | { |
2661 | size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers(); |
2662 | if (static_cast<unsigned>(index) < unlinkedIdentifiers) |
2663 | return m_unlinkedCode->identifier(index); |
2664 | ASSERT(JITCode::isOptimizingJIT(jitType())); |
2665 | return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers]; |
2666 | } |
2667 | #endif // ENABLE(DFG_JIT) |
2668 | |
2669 | void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles) |
2670 | { |
2671 | ConcurrentJSLocker locker(m_lock); |
2672 | |
2673 | numberOfLiveNonArgumentValueProfiles = 0; |
2674 | numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full. |
2675 | |
2676 | forEachValueProfile([&](ValueProfile& profile, bool isArgument) { |
2677 | unsigned numSamples = profile.totalNumberOfSamples(); |
2678 | static_assert(ValueProfile::numberOfBuckets == 1); |
2679 | if (numSamples > ValueProfile::numberOfBuckets) |
2680 | numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight. |
2681 | numberOfSamplesInProfiles += numSamples; |
2682 | if (isArgument) { |
2683 | profile.computeUpdatedPrediction(locker); |
2684 | return; |
2685 | } |
2686 | if (profile.numberOfSamples() || profile.isSampledBefore()) |
2687 | numberOfLiveNonArgumentValueProfiles++; |
2688 | profile.computeUpdatedPrediction(locker); |
2689 | }); |
2690 | |
2691 | if (auto* rareData = m_rareData.get()) { |
2692 | for (auto& profileBucket : rareData->m_catchProfiles) { |
2693 | profileBucket->forEach([&] (ValueProfileAndOperand& profile) { |
2694 | profile.computeUpdatedPrediction(locker); |
2695 | }); |
2696 | } |
2697 | } |
2698 | |
2699 | #if ENABLE(DFG_JIT) |
2700 | lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker); |
2701 | #endif |
2702 | } |
2703 | |
2704 | void CodeBlock::updateAllValueProfilePredictions() |
2705 | { |
2706 | unsigned ignoredValue1, ignoredValue2; |
2707 | updateAllValueProfilePredictionsAndCountLiveness(ignoredValue1, ignoredValue2); |
2708 | } |
2709 | |
2710 | void CodeBlock::updateAllArrayPredictions() |
2711 | { |
2712 | ConcurrentJSLocker locker(m_lock); |
2713 | |
2714 | forEachArrayProfile([&](ArrayProfile& profile) { |
2715 | profile.computeUpdatedPrediction(locker, this); |
2716 | }); |
2717 | |
2718 | forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) { |
2719 | profile.updateProfile(); |
2720 | }); |
2721 | } |
2722 | |
2723 | void CodeBlock::updateAllPredictions() |
2724 | { |
2725 | updateAllValueProfilePredictions(); |
2726 | updateAllArrayPredictions(); |
2727 | } |
2728 | |
2729 | bool CodeBlock::shouldOptimizeNow() |
2730 | { |
2731 | if (Options::verboseOSR()) |
2732 | dataLog("Considering optimizing " , *this, "...\n" ); |
2733 | |
2734 | if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay()) |
2735 | return true; |
2736 | |
2737 | updateAllArrayPredictions(); |
2738 | |
2739 | unsigned numberOfLiveNonArgumentValueProfiles; |
2740 | unsigned numberOfSamplesInProfiles; |
2741 | updateAllValueProfilePredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles); |
2742 | |
2743 | if (Options::verboseOSR()) { |
2744 | dataLogF( |
2745 | "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n" , |
2746 | (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(), |
2747 | numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(), |
2748 | (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(), |
2749 | numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles()); |
2750 | } |
2751 | |
2752 | if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate()) |
2753 | && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate()) |
2754 | && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay()) |
2755 | return true; |
2756 | |
2757 | ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max()); |
2758 | m_optimizationDelayCounter++; |
2759 | optimizeAfterWarmUp(); |
2760 | return false; |
2761 | } |
2762 | |
2763 | #if ENABLE(DFG_JIT) |
2764 | void CodeBlock::tallyFrequentExitSites() |
2765 | { |
2766 | ASSERT(JITCode::isOptimizingJIT(jitType())); |
2767 | ASSERT(alternative()->jitType() == JITType::BaselineJIT); |
2768 | |
2769 | CodeBlock* profiledBlock = alternative(); |
2770 | |
2771 | switch (jitType()) { |
2772 | case JITType::DFGJIT: { |
2773 | DFG::JITCode* jitCode = m_jitCode->dfg(); |
2774 | for (auto& exit : jitCode->osrExit) |
2775 | exit.considerAddingAsFrequentExitSite(profiledBlock); |
2776 | break; |
2777 | } |
2778 | |
2779 | #if ENABLE(FTL_JIT) |
2780 | case JITType::FTLJIT: { |
2781 | // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit |
2782 | // vector contains a totally different type, that just so happens to behave like |
2783 | // DFG::JITCode::osrExit. |
2784 | FTL::JITCode* jitCode = m_jitCode->ftl(); |
2785 | for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) { |
2786 | FTL::OSRExit& exit = jitCode->osrExit[i]; |
2787 | exit.considerAddingAsFrequentExitSite(profiledBlock); |
2788 | } |
2789 | break; |
2790 | } |
2791 | #endif |
2792 | |
2793 | default: |
2794 | RELEASE_ASSERT_NOT_REACHED(); |
2795 | break; |
2796 | } |
2797 | } |
2798 | #endif // ENABLE(DFG_JIT) |
2799 | |
2800 | void CodeBlock::notifyLexicalBindingUpdate() |
2801 | { |
2802 | // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed. |
2803 | // https://bugs.webkit.org/show_bug.cgi?id=193347 |
2804 | if (scriptMode() == JSParserScriptMode::Module) |
2805 | return; |
2806 | JSGlobalObject* globalObject = m_globalObject.get(); |
2807 | JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope()); |
2808 | SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable(); |
2809 | |
2810 | ConcurrentJSLocker locker(m_lock); |
2811 | |
2812 | auto isShadowed = [&] (UniquedStringImpl* uid) { |
2813 | ConcurrentJSLocker locker(symbolTable->m_lock); |
2814 | return symbolTable->contains(locker, uid); |
2815 | }; |
2816 | |
2817 | const InstructionStream& instructionStream = instructions(); |
2818 | for (const auto& instruction : instructionStream) { |
2819 | OpcodeID opcodeID = instruction->opcodeID(); |
2820 | switch (opcodeID) { |
2821 | case op_resolve_scope: { |
2822 | auto bytecode = instruction->as<OpResolveScope>(); |
2823 | auto& metadata = bytecode.metadata(this); |
2824 | ResolveType originalResolveType = metadata.m_resolveType; |
2825 | if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) { |
2826 | const Identifier& ident = identifier(bytecode.m_var); |
2827 | if (isShadowed(ident.impl())) |
2828 | metadata.m_globalLexicalBindingEpoch = 0; |
2829 | else |
2830 | metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch(); |
2831 | } |
2832 | break; |
2833 | } |
2834 | default: |
2835 | break; |
2836 | } |
2837 | } |
2838 | } |
2839 | |
2840 | #if ENABLE(VERBOSE_VALUE_PROFILE) |
2841 | void CodeBlock::dumpValueProfiles() |
2842 | { |
2843 | dataLog("ValueProfile for " , *this, ":\n" ); |
2844 | forEachValueProfile([](ValueProfile& profile, bool isArgument) { |
2845 | if (isArgument) |
2846 | dataLogF(" arg: " ); |
2847 | else |
2848 | dataLogF(" bc: " ); |
2849 | if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) { |
2850 | dataLogF("<empty>\n" ); |
2851 | continue; |
2852 | } |
2853 | profile.dump(WTF::dataFile()); |
2854 | dataLogF("\n" ); |
2855 | }); |
2856 | dataLog("RareCaseProfile for " , *this, ":\n" ); |
2857 | if (auto* jitData = m_jitData.get()) { |
2858 | for (RareCaseProfile* profile : jitData->m_rareCaseProfiles) |
2859 | dataLogF(" bc = %d: %u\n" , profile->m_bytecodeOffset, profile->m_counter); |
2860 | } |
2861 | } |
2862 | #endif // ENABLE(VERBOSE_VALUE_PROFILE) |
2863 | |
2864 | unsigned CodeBlock::frameRegisterCount() |
2865 | { |
2866 | switch (jitType()) { |
2867 | case JITType::InterpreterThunk: |
2868 | return LLInt::frameRegisterCountFor(this); |
2869 | |
2870 | #if ENABLE(JIT) |
2871 | case JITType::BaselineJIT: |
2872 | return JIT::frameRegisterCountFor(this); |
2873 | #endif // ENABLE(JIT) |
2874 | |
2875 | #if ENABLE(DFG_JIT) |
2876 | case JITType::DFGJIT: |
2877 | case JITType::FTLJIT: |
2878 | return jitCode()->dfgCommon()->frameRegisterCount; |
2879 | #endif // ENABLE(DFG_JIT) |
2880 | |
2881 | default: |
2882 | RELEASE_ASSERT_NOT_REACHED(); |
2883 | return 0; |
2884 | } |
2885 | } |
2886 | |
2887 | int CodeBlock::stackPointerOffset() |
2888 | { |
2889 | return virtualRegisterForLocal(frameRegisterCount() - 1).offset(); |
2890 | } |
2891 | |
2892 | size_t CodeBlock::predictedMachineCodeSize() |
2893 | { |
2894 | VM* vm = m_vm; |
2895 | // This will be called from CodeBlock::CodeBlock before either m_vm or the |
2896 | // instructions have been initialized. It's OK to return 0 because what will really |
2897 | // matter is the recomputation of this value when the slow path is triggered. |
2898 | if (!vm) |
2899 | return 0; |
2900 | |
2901 | if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT) |
2902 | return 0; // It's as good of a prediction as we'll get. |
2903 | |
2904 | // Be conservative: return a size that will be an overestimation 84% of the time. |
2905 | double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() + |
2906 | vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation(); |
2907 | |
2908 | // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing |
2909 | // here is OK, since this whole method is just a heuristic. |
2910 | if (multiplier < 0 || multiplier > 1000) |
2911 | return 0; |
2912 | |
2913 | double doubleResult = multiplier * bytecodeCost(); |
2914 | |
2915 | // Be even more paranoid: silently reject values that won't fit into a size_t. If |
2916 | // the function is so huge that we can't even fit it into virtual memory then we |
2917 | // should probably have some other guards in place to prevent us from even getting |
2918 | // to this point. |
2919 | if (doubleResult > std::numeric_limits<size_t>::max()) |
2920 | return 0; |
2921 | |
2922 | return static_cast<size_t>(doubleResult); |
2923 | } |
2924 | |
2925 | String CodeBlock::nameForRegister(VirtualRegister virtualRegister) |
2926 | { |
2927 | for (auto& constantRegister : m_constantRegisters) { |
2928 | if (constantRegister.get().isEmpty()) |
2929 | continue; |
2930 | if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(*vm(), constantRegister.get())) { |
2931 | ConcurrentJSLocker locker(symbolTable->m_lock); |
2932 | auto end = symbolTable->end(locker); |
2933 | for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) { |
2934 | if (ptr->value.varOffset() == VarOffset(virtualRegister)) { |
2935 | // FIXME: This won't work from the compilation thread. |
2936 | // https://bugs.webkit.org/show_bug.cgi?id=115300 |
2937 | return ptr->key.get(); |
2938 | } |
2939 | } |
2940 | } |
2941 | } |
2942 | if (virtualRegister == thisRegister()) |
2943 | return "this"_s ; |
2944 | if (virtualRegister.isArgument()) |
2945 | return makeString("arguments[" , pad(' ', 3, virtualRegister.toArgument()), ']'); |
2946 | |
2947 | return emptyString(); |
2948 | } |
2949 | |
2950 | ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset) |
2951 | { |
2952 | auto instruction = instructions().at(bytecodeOffset); |
2953 | switch (instruction->opcodeID()) { |
2954 | |
2955 | #define CASE(Op) \ |
2956 | case Op::opcodeID: \ |
2957 | return &instruction->as<Op>().metadata(this).m_profile; |
2958 | |
2959 | FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE) |
2960 | |
2961 | #undef CASE |
2962 | |
2963 | default: |
2964 | return nullptr; |
2965 | |
2966 | } |
2967 | } |
2968 | |
2969 | SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset) |
2970 | { |
2971 | if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset)) |
2972 | return valueProfile->computeUpdatedPrediction(locker); |
2973 | return SpecNone; |
2974 | } |
2975 | |
2976 | ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset) |
2977 | { |
2978 | return *tryGetValueProfileForBytecodeOffset(bytecodeOffset); |
2979 | } |
2980 | |
2981 | void CodeBlock::validate() |
2982 | { |
2983 | BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint. |
2984 | |
2985 | FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(this, 0); |
2986 | |
2987 | if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) { |
2988 | beginValidationDidFail(); |
2989 | dataLog(" Wrong number of bits in result!\n" ); |
2990 | dataLog(" Result: " , liveAtHead, "\n" ); |
2991 | dataLog(" Bit count: " , liveAtHead.numBits(), "\n" ); |
2992 | endValidationDidFail(); |
2993 | } |
2994 | |
2995 | for (unsigned i = m_numCalleeLocals; i--;) { |
2996 | VirtualRegister reg = virtualRegisterForLocal(i); |
2997 | |
2998 | if (liveAtHead[i]) { |
2999 | beginValidationDidFail(); |
3000 | dataLog(" Variable " , reg, " is expected to be dead.\n" ); |
3001 | dataLog(" Result: " , liveAtHead, "\n" ); |
3002 | endValidationDidFail(); |
3003 | } |
3004 | } |
3005 | |
3006 | const InstructionStream& instructionStream = instructions(); |
3007 | for (const auto& instruction : instructionStream) { |
3008 | OpcodeID opcode = instruction->opcodeID(); |
3009 | if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) { |
3010 | if (opcode == op_catch || opcode == op_enter) { |
3011 | // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be |
3012 | // inside of a try block because they are responsible for bootstrapping state. And they |
3013 | // are never allowed throw an exception because of this. We rely on this when compiling |
3014 | // in the DFG. Because an entrypoint never throws, the bytecode generator will never |
3015 | // allow once inside a try block. |
3016 | beginValidationDidFail(); |
3017 | dataLog(" entrypoint not allowed inside a try block." ); |
3018 | endValidationDidFail(); |
3019 | } |
3020 | } |
3021 | } |
3022 | } |
3023 | |
3024 | void CodeBlock::beginValidationDidFail() |
3025 | { |
3026 | dataLog("Validation failure in " , *this, ":\n" ); |
3027 | dataLog("\n" ); |
3028 | } |
3029 | |
3030 | void CodeBlock::endValidationDidFail() |
3031 | { |
3032 | dataLog("\n" ); |
3033 | dumpBytecode(); |
3034 | dataLog("\n" ); |
3035 | dataLog("Validation failure.\n" ); |
3036 | RELEASE_ASSERT_NOT_REACHED(); |
3037 | } |
3038 | |
3039 | void CodeBlock::addBreakpoint(unsigned numBreakpoints) |
3040 | { |
3041 | m_numBreakpoints += numBreakpoints; |
3042 | ASSERT(m_numBreakpoints); |
3043 | if (JITCode::isOptimizingJIT(jitType())) |
3044 | jettison(Profiler::JettisonDueToDebuggerBreakpoint); |
3045 | } |
3046 | |
3047 | void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode) |
3048 | { |
3049 | m_steppingMode = mode; |
3050 | if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType())) |
3051 | jettison(Profiler::JettisonDueToDebuggerStepping); |
3052 | } |
3053 | |
3054 | int CodeBlock::outOfLineJumpOffset(const Instruction* pc) |
3055 | { |
3056 | int offset = bytecodeOffset(pc); |
3057 | return m_unlinkedCode->outOfLineJumpOffset(offset); |
3058 | } |
3059 | |
3060 | const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc) |
3061 | { |
3062 | int offset = bytecodeOffset(pc); |
3063 | int target = m_unlinkedCode->outOfLineJumpOffset(offset); |
3064 | return instructions().at(offset + target).ptr(); |
3065 | } |
3066 | |
3067 | ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset) |
3068 | { |
3069 | return arithProfileForPC(instructions().at(bytecodeOffset).ptr()); |
3070 | } |
3071 | |
3072 | ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc) |
3073 | { |
3074 | switch (pc->opcodeID()) { |
3075 | case op_negate: |
3076 | return &pc->as<OpNegate>().metadata(this).m_arithProfile; |
3077 | case op_add: |
3078 | return &pc->as<OpAdd>().metadata(this).m_arithProfile; |
3079 | case op_mul: |
3080 | return &pc->as<OpMul>().metadata(this).m_arithProfile; |
3081 | case op_sub: |
3082 | return &pc->as<OpSub>().metadata(this).m_arithProfile; |
3083 | case op_div: |
3084 | return &pc->as<OpDiv>().metadata(this).m_arithProfile; |
3085 | default: |
3086 | break; |
3087 | } |
3088 | |
3089 | return nullptr; |
3090 | } |
3091 | |
3092 | bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset) |
3093 | { |
3094 | if (!hasBaselineJITProfiling()) |
3095 | return false; |
3096 | ArithProfile* profile = arithProfileForBytecodeOffset(bytecodeOffset); |
3097 | if (!profile) |
3098 | return false; |
3099 | return profile->tookSpecialFastPath(); |
3100 | } |
3101 | |
3102 | #if ENABLE(JIT) |
3103 | DFG::CapabilityLevel CodeBlock::capabilityLevel() |
3104 | { |
3105 | DFG::CapabilityLevel result = computeCapabilityLevel(); |
3106 | m_capabilityLevelState = result; |
3107 | return result; |
3108 | } |
3109 | #endif |
3110 | |
3111 | void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler() |
3112 | { |
3113 | if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets()) |
3114 | return; |
3115 | const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); |
3116 | for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) { |
3117 | // Because op_profile_control_flow is emitted at the beginning of every basic block, finding |
3118 | // the next op_profile_control_flow will give us the text range of a single basic block. |
3119 | size_t startIdx = bytecodeOffsets[i]; |
3120 | auto instruction = instructions().at(startIdx); |
3121 | RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow); |
3122 | auto bytecode = instruction->as<OpProfileControlFlow>(); |
3123 | auto& metadata = bytecode.metadata(this); |
3124 | int basicBlockStartOffset = bytecode.m_textOffset; |
3125 | int basicBlockEndOffset; |
3126 | if (i + 1 < offsetsLength) { |
3127 | size_t endIdx = bytecodeOffsets[i + 1]; |
3128 | auto endInstruction = instructions().at(endIdx); |
3129 | RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow); |
3130 | basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1; |
3131 | } else { |
3132 | basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace. |
3133 | basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before. |
3134 | } |
3135 | |
3136 | // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more |
3137 | // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than |
3138 | // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node |
3139 | // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different |
3140 | // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript |
3141 | // program. The condition: |
3142 | // (basicBlockEndOffset < basicBlockStartOffset) |
3143 | // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic |
3144 | // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These |
3145 | // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same |
3146 | // internal data structure, so if any of them execute, it will record the same textual basic block in the |
3147 | // JavaScript program as executing. |
3148 | // At the bytecode level, this situation looks like: |
3149 | // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset) |
3150 | // ... |
3151 | // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m). |
3152 | // ... |
3153 | // m: op_profile_control_flow |
3154 | if (basicBlockEndOffset < basicBlockStartOffset) { |
3155 | RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock. |
3156 | metadata.m_basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock(); |
3157 | continue; |
3158 | } |
3159 | |
3160 | BasicBlockLocation* basicBlockLocation = vm()->controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset); |
3161 | |
3162 | // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset] |
3163 | // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation. |
3164 | // This is necessary because in the original source text of a JavaScript program, |
3165 | // function literals form new basic blocks boundaries, but they aren't represented |
3166 | // inside the CodeBlock's instruction stream. |
3167 | auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) { |
3168 | const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable(); |
3169 | int functionStart = executable->typeProfilingStartOffset(); |
3170 | int functionEnd = executable->typeProfilingEndOffset(); |
3171 | if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset) |
3172 | basicBlockLocation->insertGap(functionStart, functionEnd); |
3173 | }; |
3174 | |
3175 | for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls) |
3176 | insertFunctionGaps(executable); |
3177 | for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs) |
3178 | insertFunctionGaps(executable); |
3179 | |
3180 | metadata.m_basicBlockLocation = basicBlockLocation; |
3181 | } |
3182 | } |
3183 | |
3184 | #if ENABLE(JIT) |
3185 | void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) |
3186 | { |
3187 | ConcurrentJSLocker locker(m_lock); |
3188 | ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map); |
3189 | } |
3190 | |
3191 | Optional<CodeOrigin> CodeBlock::findPC(void* pc) |
3192 | { |
3193 | { |
3194 | ConcurrentJSLocker locker(m_lock); |
3195 | if (auto* jitData = m_jitData.get()) { |
3196 | if (jitData->m_pcToCodeOriginMap) { |
3197 | if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc)) |
3198 | return codeOrigin; |
3199 | } |
3200 | |
3201 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) { |
3202 | if (stubInfo->containsPC(pc)) |
3203 | return Optional<CodeOrigin>(stubInfo->codeOrigin); |
3204 | } |
3205 | } |
3206 | } |
3207 | |
3208 | if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc)) |
3209 | return codeOrigin; |
3210 | |
3211 | return WTF::nullopt; |
3212 | } |
3213 | #endif // ENABLE(JIT) |
3214 | |
3215 | Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex) |
3216 | { |
3217 | Optional<unsigned> bytecodeOffset; |
3218 | JITType jitType = this->jitType(); |
3219 | if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) { |
3220 | #if USE(JSVALUE64) |
3221 | bytecodeOffset = callSiteIndex.bits(); |
3222 | #else |
3223 | Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits()); |
3224 | bytecodeOffset = this->bytecodeOffset(instruction); |
3225 | #endif |
3226 | } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) { |
3227 | #if ENABLE(DFG_JIT) |
3228 | RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex)); |
3229 | CodeOrigin origin = codeOrigin(callSiteIndex); |
3230 | bytecodeOffset = origin.bytecodeIndex(); |
3231 | #else |
3232 | RELEASE_ASSERT_NOT_REACHED(); |
3233 | #endif |
3234 | } |
3235 | |
3236 | return bytecodeOffset; |
3237 | } |
3238 | |
3239 | int32_t CodeBlock::thresholdForJIT(int32_t threshold) |
3240 | { |
3241 | switch (unlinkedCodeBlock()->didOptimize()) { |
3242 | case MixedTriState: |
3243 | return threshold; |
3244 | case FalseTriState: |
3245 | return threshold * 4; |
3246 | case TrueTriState: |
3247 | return threshold / 2; |
3248 | } |
3249 | ASSERT_NOT_REACHED(); |
3250 | return threshold; |
3251 | } |
3252 | |
3253 | void CodeBlock::jitAfterWarmUp() |
3254 | { |
3255 | m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this); |
3256 | } |
3257 | |
3258 | void CodeBlock::jitSoon() |
3259 | { |
3260 | m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this); |
3261 | } |
3262 | |
3263 | bool CodeBlock::hasInstalledVMTrapBreakpoints() const |
3264 | { |
3265 | #if ENABLE(SIGNAL_BASED_VM_TRAPS) |
3266 | // This function may be called from a signal handler. We need to be |
3267 | // careful to not call anything that is not signal handler safe, e.g. |
3268 | // we should not perturb the refCount of m_jitCode. |
3269 | if (!JITCode::isOptimizingJIT(jitType())) |
3270 | return false; |
3271 | return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints(); |
3272 | #else |
3273 | return false; |
3274 | #endif |
3275 | } |
3276 | |
3277 | bool CodeBlock::installVMTrapBreakpoints() |
3278 | { |
3279 | #if ENABLE(SIGNAL_BASED_VM_TRAPS) |
3280 | // This function may be called from a signal handler. We need to be |
3281 | // careful to not call anything that is not signal handler safe, e.g. |
3282 | // we should not perturb the refCount of m_jitCode. |
3283 | if (!JITCode::isOptimizingJIT(jitType())) |
3284 | return false; |
3285 | auto& commonData = *m_jitCode->dfgCommon(); |
3286 | commonData.installVMTrapBreakpoints(this); |
3287 | return true; |
3288 | #else |
3289 | UNREACHABLE_FOR_PLATFORM(); |
3290 | return false; |
3291 | #endif |
3292 | } |
3293 | |
3294 | void CodeBlock::dumpMathICStats() |
3295 | { |
3296 | #if ENABLE(MATH_IC_STATS) |
3297 | double numAdds = 0.0; |
3298 | double totalAddSize = 0.0; |
3299 | double numMuls = 0.0; |
3300 | double totalMulSize = 0.0; |
3301 | double numNegs = 0.0; |
3302 | double totalNegSize = 0.0; |
3303 | double numSubs = 0.0; |
3304 | double totalSubSize = 0.0; |
3305 | |
3306 | auto countICs = [&] (CodeBlock* codeBlock) { |
3307 | if (auto* jitData = codeBlock->m_jitData.get()) { |
3308 | for (JITAddIC* addIC : jitData->m_addICs) { |
3309 | numAdds++; |
3310 | totalAddSize += addIC->codeSize(); |
3311 | } |
3312 | |
3313 | for (JITMulIC* mulIC : jitData->m_mulICs) { |
3314 | numMuls++; |
3315 | totalMulSize += mulIC->codeSize(); |
3316 | } |
3317 | |
3318 | for (JITNegIC* negIC : jitData->m_negICs) { |
3319 | numNegs++; |
3320 | totalNegSize += negIC->codeSize(); |
3321 | } |
3322 | |
3323 | for (JITSubIC* subIC : jitData->m_subICs) { |
3324 | numSubs++; |
3325 | totalSubSize += subIC->codeSize(); |
3326 | } |
3327 | } |
3328 | }; |
3329 | heap()->forEachCodeBlock(countICs); |
3330 | |
3331 | dataLog("Num Adds: " , numAdds, "\n" ); |
3332 | dataLog("Total Add size in bytes: " , totalAddSize, "\n" ); |
3333 | dataLog("Average Add size: " , totalAddSize / numAdds, "\n" ); |
3334 | dataLog("\n" ); |
3335 | dataLog("Num Muls: " , numMuls, "\n" ); |
3336 | dataLog("Total Mul size in bytes: " , totalMulSize, "\n" ); |
3337 | dataLog("Average Mul size: " , totalMulSize / numMuls, "\n" ); |
3338 | dataLog("\n" ); |
3339 | dataLog("Num Negs: " , numNegs, "\n" ); |
3340 | dataLog("Total Neg size in bytes: " , totalNegSize, "\n" ); |
3341 | dataLog("Average Neg size: " , totalNegSize / numNegs, "\n" ); |
3342 | dataLog("\n" ); |
3343 | dataLog("Num Subs: " , numSubs, "\n" ); |
3344 | dataLog("Total Sub size in bytes: " , totalSubSize, "\n" ); |
3345 | dataLog("Average Sub size: " , totalSubSize / numSubs, "\n" ); |
3346 | |
3347 | dataLog("-----------------------\n" ); |
3348 | #endif |
3349 | } |
3350 | |
3351 | void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock) |
3352 | { |
3353 | Printer::setPrinter(record, toCString(codeBlock)); |
3354 | } |
3355 | |
3356 | } // namespace JSC |
3357 | |
3358 | namespace WTF { |
3359 | |
3360 | void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock) |
3361 | { |
3362 | if (UNLIKELY(!codeBlock)) { |
3363 | out.print("<null codeBlock>" ); |
3364 | return; |
3365 | } |
3366 | out.print(*codeBlock); |
3367 | } |
3368 | |
3369 | } // namespace WTF |
3370 | |