1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2008 Cameron Zwarich <[email protected]> |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of |
15 | * its contributors may be used to endorse or promote products derived |
16 | * from this software without specific prior written permission. |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | */ |
29 | |
30 | #include "config.h" |
31 | #include "CodeBlock.h" |
32 | |
33 | #include "ArithProfile.h" |
34 | #include "BasicBlockLocation.h" |
35 | #include "BytecodeDumper.h" |
36 | #include "BytecodeGenerator.h" |
37 | #include "BytecodeLivenessAnalysis.h" |
38 | #include "BytecodeStructs.h" |
39 | #include "BytecodeUseDef.h" |
40 | #include "CallLinkStatus.h" |
41 | #include "CodeBlockInlines.h" |
42 | #include "CodeBlockSet.h" |
43 | #include "DFGCapabilities.h" |
44 | #include "DFGCommon.h" |
45 | #include "DFGDriver.h" |
46 | #include "DFGJITCode.h" |
47 | #include "DFGWorklist.h" |
48 | #include "Debugger.h" |
49 | #include "EvalCodeBlock.h" |
50 | #include "FullCodeOrigin.h" |
51 | #include "FunctionCodeBlock.h" |
52 | #include "FunctionExecutableDump.h" |
53 | #include "GetPutInfo.h" |
54 | #include "InlineCallFrame.h" |
55 | #include "Instruction.h" |
56 | #include "InstructionStream.h" |
57 | #include "InterpreterInlines.h" |
58 | #include "IsoCellSetInlines.h" |
59 | #include "JIT.h" |
60 | #include "JITMathIC.h" |
61 | #include "JSBigInt.h" |
62 | #include "JSCInlines.h" |
63 | #include "JSCJSValue.h" |
64 | #include "JSFunction.h" |
65 | #include "JSLexicalEnvironment.h" |
66 | #include "JSModuleEnvironment.h" |
67 | #include "JSSet.h" |
68 | #include "JSString.h" |
69 | #include "JSTemplateObjectDescriptor.h" |
70 | #include "LLIntData.h" |
71 | #include "LLIntEntrypoint.h" |
72 | #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h" |
73 | #include "LowLevelInterpreter.h" |
74 | #include "MetadataTable.h" |
75 | #include "ModuleProgramCodeBlock.h" |
76 | #include "ObjectAllocationProfileInlines.h" |
77 | #include "OpcodeInlines.h" |
78 | #include "PCToCodeOriginMap.h" |
79 | #include "PolymorphicAccess.h" |
80 | #include "ProfilerDatabase.h" |
81 | #include "ProgramCodeBlock.h" |
82 | #include "ReduceWhitespace.h" |
83 | #include "Repatch.h" |
84 | #include "SlotVisitorInlines.h" |
85 | #include "StackVisitor.h" |
86 | #include "StructureStubInfo.h" |
87 | #include "TypeLocationCache.h" |
88 | #include "TypeProfiler.h" |
89 | #include "VMInlines.h" |
90 | #include <wtf/BagToHashMap.h> |
91 | #include <wtf/CommaPrinter.h> |
92 | #include <wtf/Forward.h> |
93 | #include <wtf/SimpleStats.h> |
94 | #include <wtf/StringPrintStream.h> |
95 | #include <wtf/text/StringConcatenateNumbers.h> |
96 | #include <wtf/text/UniquedStringImpl.h> |
97 | |
98 | #if ENABLE(ASSEMBLER) |
99 | #include "RegisterAtOffsetList.h" |
100 | #endif |
101 | |
102 | #if ENABLE(DFG_JIT) |
103 | #include "DFGOperations.h" |
104 | #endif |
105 | |
106 | #if ENABLE(FTL_JIT) |
107 | #include "FTLJITCode.h" |
108 | #endif |
109 | |
110 | namespace JSC { |
111 | |
112 | const ClassInfo CodeBlock::s_info = { |
113 | "CodeBlock" , nullptr, nullptr, nullptr, |
114 | CREATE_METHOD_TABLE(CodeBlock) |
115 | }; |
116 | |
117 | CString CodeBlock::inferredName() const |
118 | { |
119 | switch (codeType()) { |
120 | case GlobalCode: |
121 | return "<global>" ; |
122 | case EvalCode: |
123 | return "<eval>" ; |
124 | case FunctionCode: |
125 | return jsCast<FunctionExecutable*>(ownerExecutable())->ecmaName().utf8(); |
126 | case ModuleCode: |
127 | return "<module>" ; |
128 | default: |
129 | CRASH(); |
130 | return CString("" , 0); |
131 | } |
132 | } |
133 | |
134 | bool CodeBlock::hasHash() const |
135 | { |
136 | return !!m_hash; |
137 | } |
138 | |
139 | bool CodeBlock::isSafeToComputeHash() const |
140 | { |
141 | return !isCompilationThread(); |
142 | } |
143 | |
144 | CodeBlockHash CodeBlock::hash() const |
145 | { |
146 | if (!m_hash) { |
147 | RELEASE_ASSERT(isSafeToComputeHash()); |
148 | m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind()); |
149 | } |
150 | return m_hash; |
151 | } |
152 | |
153 | CString CodeBlock::sourceCodeForTools() const |
154 | { |
155 | if (codeType() != FunctionCode) |
156 | return ownerExecutable()->source().toUTF8(); |
157 | |
158 | SourceProvider* provider = source().provider(); |
159 | FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable()); |
160 | UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable(); |
161 | unsigned unlinkedStartOffset = unlinked->startOffset(); |
162 | unsigned linkedStartOffset = executable->source().startOffset(); |
163 | int delta = linkedStartOffset - unlinkedStartOffset; |
164 | unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart(); |
165 | unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength(); |
166 | return toCString( |
167 | "function " , |
168 | provider->source().substring(rangeStart, rangeEnd - rangeStart).utf8()); |
169 | } |
170 | |
171 | CString CodeBlock::sourceCodeOnOneLine() const |
172 | { |
173 | return reduceWhitespace(sourceCodeForTools()); |
174 | } |
175 | |
176 | CString CodeBlock::hashAsStringIfPossible() const |
177 | { |
178 | if (hasHash() || isSafeToComputeHash()) |
179 | return toCString(hash()); |
180 | return "<no-hash>" ; |
181 | } |
182 | |
183 | void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const |
184 | { |
185 | out.print(inferredName(), "#" , hashAsStringIfPossible()); |
186 | out.print(":[" , RawPointer(this), "->" ); |
187 | if (!!m_alternative) |
188 | out.print(RawPointer(alternative()), "->" ); |
189 | out.print(RawPointer(ownerExecutable()), ", " , jitType, codeType()); |
190 | |
191 | if (codeType() == FunctionCode) |
192 | out.print(specializationKind()); |
193 | out.print(", " , instructionsSize()); |
194 | if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined) |
195 | out.print(" (ShouldAlwaysBeInlined)" ); |
196 | if (ownerExecutable()->neverInline()) |
197 | out.print(" (NeverInline)" ); |
198 | if (ownerExecutable()->neverOptimize()) |
199 | out.print(" (NeverOptimize)" ); |
200 | else if (ownerExecutable()->neverFTLOptimize()) |
201 | out.print(" (NeverFTLOptimize)" ); |
202 | if (ownerExecutable()->didTryToEnterInLoop()) |
203 | out.print(" (DidTryToEnterInLoop)" ); |
204 | if (ownerExecutable()->isStrictMode()) |
205 | out.print(" (StrictMode)" ); |
206 | if (m_didFailJITCompilation) |
207 | out.print(" (JITFail)" ); |
208 | if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation) |
209 | out.print(" (FTLFail)" ); |
210 | if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL) |
211 | out.print(" (HadFTLReplacement)" ); |
212 | out.print("]" ); |
213 | } |
214 | |
215 | void CodeBlock::dump(PrintStream& out) const |
216 | { |
217 | dumpAssumingJITType(out, jitType()); |
218 | } |
219 | |
220 | void CodeBlock::dumpSource() |
221 | { |
222 | dumpSource(WTF::dataFile()); |
223 | } |
224 | |
225 | void CodeBlock::dumpSource(PrintStream& out) |
226 | { |
227 | ScriptExecutable* executable = ownerExecutable(); |
228 | if (executable->isFunctionExecutable()) { |
229 | FunctionExecutable* functionExecutable = reinterpret_cast<FunctionExecutable*>(executable); |
230 | StringView source = functionExecutable->source().provider()->getRange( |
231 | functionExecutable->parametersStartOffset(), |
232 | functionExecutable->typeProfilingEndOffset(vm()) + 1); // Type profiling end offset is the character before the '}'. |
233 | |
234 | out.print("function " , inferredName(), source); |
235 | return; |
236 | } |
237 | out.print(executable->source().view()); |
238 | } |
239 | |
240 | void CodeBlock::dumpBytecode() |
241 | { |
242 | dumpBytecode(WTF::dataFile()); |
243 | } |
244 | |
245 | void CodeBlock::dumpBytecode(PrintStream& out) |
246 | { |
247 | ICStatusMap statusMap; |
248 | getICStatusMap(statusMap); |
249 | CodeBlockBytecodeDumper<CodeBlock>::dumpBlock(this, instructions(), out, statusMap); |
250 | } |
251 | |
252 | void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap) |
253 | { |
254 | BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap); |
255 | } |
256 | |
257 | void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap) |
258 | { |
259 | const auto it = instructions().at(bytecodeOffset); |
260 | dumpBytecode(out, it, statusMap); |
261 | } |
262 | |
263 | namespace { |
264 | |
265 | class PutToScopeFireDetail : public FireDetail { |
266 | public: |
267 | PutToScopeFireDetail(CodeBlock* codeBlock, const Identifier& ident) |
268 | : m_codeBlock(codeBlock) |
269 | , m_ident(ident) |
270 | { |
271 | } |
272 | |
273 | void dump(PrintStream& out) const override |
274 | { |
275 | out.print("Linking put_to_scope in " , FunctionExecutableDump(jsCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())), " for " , m_ident); |
276 | } |
277 | |
278 | private: |
279 | CodeBlock* m_codeBlock; |
280 | const Identifier& m_ident; |
281 | }; |
282 | |
283 | } // anonymous namespace |
284 | |
285 | CodeBlock::CodeBlock(VM& vm, Structure* structure, CopyParsedBlockTag, CodeBlock& other) |
286 | : JSCell(vm, structure) |
287 | , m_globalObject(other.m_globalObject) |
288 | , m_shouldAlwaysBeInlined(true) |
289 | #if ENABLE(JIT) |
290 | , m_capabilityLevelState(DFG::CapabilityLevelNotSet) |
291 | #endif |
292 | , m_didFailJITCompilation(false) |
293 | , m_didFailFTLCompilation(false) |
294 | , m_hasBeenCompiledWithFTL(false) |
295 | , m_numCalleeLocals(other.m_numCalleeLocals) |
296 | , m_numVars(other.m_numVars) |
297 | , m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip) |
298 | , m_hasDebuggerStatement(false) |
299 | , m_steppingMode(SteppingModeDisabled) |
300 | , m_numBreakpoints(0) |
301 | , m_bytecodeCost(other.m_bytecodeCost) |
302 | , m_scopeRegister(other.m_scopeRegister) |
303 | , m_hash(other.m_hash) |
304 | , m_unlinkedCode(other.vm(), this, other.m_unlinkedCode.get()) |
305 | , m_ownerExecutable(other.vm(), this, other.m_ownerExecutable.get()) |
306 | , m_vm(other.m_vm) |
307 | , m_instructionsRawPointer(other.m_instructionsRawPointer) |
308 | , m_constantRegisters(other.m_constantRegisters) |
309 | , m_constantsSourceCodeRepresentation(other.m_constantsSourceCodeRepresentation) |
310 | , m_functionDecls(other.m_functionDecls) |
311 | , m_functionExprs(other.m_functionExprs) |
312 | , m_osrExitCounter(0) |
313 | , m_optimizationDelayCounter(0) |
314 | , m_reoptimizationRetryCounter(0) |
315 | , m_metadata(other.m_metadata) |
316 | , m_creationTime(MonotonicTime::now()) |
317 | { |
318 | ASSERT(heap()->isDeferred()); |
319 | ASSERT(m_scopeRegister.isLocal()); |
320 | |
321 | ASSERT(source().provider()); |
322 | setNumParameters(other.numParameters()); |
323 | |
324 | vm.heap.codeBlockSet().add(this); |
325 | } |
326 | |
327 | void CodeBlock::finishCreation(VM& vm, CopyParsedBlockTag, CodeBlock& other) |
328 | { |
329 | Base::finishCreation(vm); |
330 | finishCreationCommon(vm); |
331 | |
332 | optimizeAfterWarmUp(); |
333 | jitAfterWarmUp(); |
334 | |
335 | if (other.m_rareData) { |
336 | createRareDataIfNecessary(); |
337 | |
338 | m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers; |
339 | m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables; |
340 | m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables; |
341 | } |
342 | } |
343 | |
344 | CodeBlock::CodeBlock(VM& vm, Structure* structure, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope) |
345 | : JSCell(vm, structure) |
346 | , m_globalObject(vm, this, scope->globalObject(vm)) |
347 | , m_shouldAlwaysBeInlined(true) |
348 | #if ENABLE(JIT) |
349 | , m_capabilityLevelState(DFG::CapabilityLevelNotSet) |
350 | #endif |
351 | , m_didFailJITCompilation(false) |
352 | , m_didFailFTLCompilation(false) |
353 | , m_hasBeenCompiledWithFTL(false) |
354 | , m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals()) |
355 | , m_numVars(unlinkedCodeBlock->numVars()) |
356 | , m_hasDebuggerStatement(false) |
357 | , m_steppingMode(SteppingModeDisabled) |
358 | , m_numBreakpoints(0) |
359 | , m_scopeRegister(unlinkedCodeBlock->scopeRegister()) |
360 | , m_unlinkedCode(vm, this, unlinkedCodeBlock) |
361 | , m_ownerExecutable(vm, this, ownerExecutable) |
362 | , m_vm(&vm) |
363 | , m_instructionsRawPointer(unlinkedCodeBlock->instructions().rawPointer()) |
364 | , m_osrExitCounter(0) |
365 | , m_optimizationDelayCounter(0) |
366 | , m_reoptimizationRetryCounter(0) |
367 | , m_metadata(unlinkedCodeBlock->metadata().link()) |
368 | , m_creationTime(MonotonicTime::now()) |
369 | { |
370 | ASSERT(heap()->isDeferred()); |
371 | ASSERT(m_scopeRegister.isLocal()); |
372 | |
373 | ASSERT(source().provider()); |
374 | setNumParameters(unlinkedCodeBlock->numParameters()); |
375 | |
376 | vm.heap.codeBlockSet().add(this); |
377 | } |
378 | |
379 | // The main purpose of this function is to generate linked bytecode from unlinked bytecode. The process |
380 | // of linking is taking an abstract representation of bytecode and tying it to a GlobalObject and scope |
381 | // chain. For example, this process allows us to cache the depth of lexical environment reads that reach |
382 | // outside of this CodeBlock's compilation unit. It also allows us to generate particular constants that |
383 | // we can't generate during unlinked bytecode generation. This process is not allowed to generate control |
384 | // flow or introduce new locals. The reason for this is we rely on liveness analysis to be the same for |
385 | // all the CodeBlocks of an UnlinkedCodeBlock. We rely on this fact by caching the liveness analysis |
386 | // inside UnlinkedCodeBlock. |
387 | bool CodeBlock::finishCreation(VM& vm, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, |
388 | JSScope* scope) |
389 | { |
390 | Base::finishCreation(vm); |
391 | finishCreationCommon(vm); |
392 | |
393 | auto throwScope = DECLARE_THROW_SCOPE(vm); |
394 | |
395 | if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes()) |
396 | vm.functionHasExecutedCache()->removeUnexecutedRange(ownerExecutable->sourceID(), ownerExecutable->typeProfilingStartOffset(vm), ownerExecutable->typeProfilingEndOffset(vm)); |
397 | |
398 | ScriptExecutable* topLevelExecutable = ownerExecutable->topLevelExecutable(); |
399 | setConstantRegisters(unlinkedCodeBlock->constantRegisters(), unlinkedCodeBlock->constantsSourceCodeRepresentation(), topLevelExecutable); |
400 | RETURN_IF_EXCEPTION(throwScope, false); |
401 | |
402 | // We already have the cloned symbol table for the module environment since we need to instantiate |
403 | // the module environments before linking the code block. We replace the stored symbol table with the already cloned one. |
404 | if (UnlinkedModuleProgramCodeBlock* unlinkedModuleProgramCodeBlock = jsDynamicCast<UnlinkedModuleProgramCodeBlock*>(vm, unlinkedCodeBlock)) { |
405 | SymbolTable* clonedSymbolTable = jsCast<ModuleProgramExecutable*>(ownerExecutable)->moduleEnvironmentSymbolTable(); |
406 | if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) { |
407 | ConcurrentJSLocker locker(clonedSymbolTable->m_lock); |
408 | clonedSymbolTable->prepareForTypeProfiling(locker); |
409 | } |
410 | replaceConstant(unlinkedModuleProgramCodeBlock->moduleEnvironmentSymbolTableConstantRegisterOffset(), clonedSymbolTable); |
411 | } |
412 | |
413 | bool shouldUpdateFunctionHasExecutedCache = m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes() || m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes(); |
414 | m_functionDecls = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionDecls()); |
415 | for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) { |
416 | UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i); |
417 | if (shouldUpdateFunctionHasExecutedCache) |
418 | vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); |
419 | m_functionDecls[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source())); |
420 | } |
421 | |
422 | m_functionExprs = RefCountedArray<WriteBarrier<FunctionExecutable>>(unlinkedCodeBlock->numberOfFunctionExprs()); |
423 | for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) { |
424 | UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i); |
425 | if (shouldUpdateFunctionHasExecutedCache) |
426 | vm.functionHasExecutedCache()->insertUnexecutedRange(ownerExecutable->sourceID(), unlinkedExecutable->typeProfilingStartOffset(), unlinkedExecutable->typeProfilingEndOffset()); |
427 | m_functionExprs[i].set(vm, this, unlinkedExecutable->link(vm, topLevelExecutable, ownerExecutable->source())); |
428 | } |
429 | |
430 | if (unlinkedCodeBlock->hasRareData()) { |
431 | createRareDataIfNecessary(); |
432 | |
433 | setConstantIdentifierSetRegisters(vm, unlinkedCodeBlock->constantIdentifierSets()); |
434 | RETURN_IF_EXCEPTION(throwScope, false); |
435 | |
436 | if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) { |
437 | m_rareData->m_exceptionHandlers.resizeToFit(count); |
438 | for (size_t i = 0; i < count; i++) { |
439 | const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i); |
440 | HandlerInfo& handler = m_rareData->m_exceptionHandlers[i]; |
441 | #if ENABLE(JIT) |
442 | auto instruction = instructions().at(unlinkedHandler.target); |
443 | MacroAssemblerCodePtr<BytecodePtrTag> codePtr; |
444 | if (instruction->isWide32()) |
445 | codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch); |
446 | else if (instruction->isWide16()) |
447 | codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch); |
448 | else |
449 | codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch); |
450 | handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>())); |
451 | #else |
452 | handler.initialize(unlinkedHandler); |
453 | #endif |
454 | } |
455 | } |
456 | |
457 | if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) { |
458 | m_rareData->m_stringSwitchJumpTables.grow(count); |
459 | for (size_t i = 0; i < count; i++) { |
460 | UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin(); |
461 | UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end(); |
462 | for (; ptr != end; ++ptr) { |
463 | OffsetLocation offset; |
464 | offset.branchOffset = ptr->value.branchOffset; |
465 | m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset); |
466 | } |
467 | } |
468 | } |
469 | |
470 | if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) { |
471 | m_rareData->m_switchJumpTables.grow(count); |
472 | for (size_t i = 0; i < count; i++) { |
473 | UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i); |
474 | SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i]; |
475 | destTable.branchOffsets = sourceTable.branchOffsets; |
476 | destTable.min = sourceTable.min; |
477 | } |
478 | } |
479 | } |
480 | |
481 | // Bookkeep the strongly referenced module environments. |
482 | HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments; |
483 | |
484 | auto link_profile = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& /*metadata*/) { |
485 | m_numberOfNonArgumentValueProfiles++; |
486 | }; |
487 | |
488 | auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) { |
489 | metadata.m_objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.m_inlineCapacity); |
490 | }; |
491 | |
492 | auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) { |
493 | metadata.m_arrayAllocationProfile.initializeIndexingMode(bytecode.m_recommendedIndexingType); |
494 | }; |
495 | |
496 | #define LINK_FIELD(__field) \ |
497 | WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata); |
498 | |
499 | #define INITIALIZE_METADATA(__op) \ |
500 | auto bytecode = instruction->as<__op>(); \ |
501 | auto& metadata = bytecode.metadata(this); \ |
502 | new (&metadata) __op::Metadata { bytecode }; \ |
503 | |
504 | #define CASE(__op) case __op::opcodeID |
505 | |
506 | #define LINK(...) \ |
507 | CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \ |
508 | INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \ |
509 | WTF_LAZY_HAS_REST(__VA_ARGS__)({ \ |
510 | WTF_LAZY_FOR_EACH_TERM(LINK_FIELD, WTF_LAZY_REST_(__VA_ARGS__)) \ |
511 | }) \ |
512 | break; \ |
513 | } |
514 | |
515 | const InstructionStream& instructionStream = instructions(); |
516 | for (const auto& instruction : instructionStream) { |
517 | OpcodeID opcodeID = instruction->opcodeID(); |
518 | m_bytecodeCost += opcodeLengths[opcodeID]; |
519 | switch (opcodeID) { |
520 | LINK(OpHasIndexedProperty) |
521 | |
522 | LINK(OpCallVarargs, profile) |
523 | LINK(OpTailCallVarargs, profile) |
524 | LINK(OpTailCallForwardArguments, profile) |
525 | LINK(OpConstructVarargs, profile) |
526 | LINK(OpGetByVal, profile) |
527 | |
528 | LINK(OpGetDirectPname, profile) |
529 | LINK(OpGetByIdWithThis, profile) |
530 | LINK(OpTryGetById, profile) |
531 | LINK(OpGetByIdDirect, profile) |
532 | LINK(OpGetByValWithThis, profile) |
533 | LINK(OpGetFromArguments, profile) |
534 | LINK(OpToNumber, profile) |
535 | LINK(OpToNumeric, profile) |
536 | LINK(OpToObject, profile) |
537 | LINK(OpGetArgument, profile) |
538 | LINK(OpGetInternalField, profile) |
539 | LINK(OpToThis, profile) |
540 | LINK(OpBitand, profile) |
541 | LINK(OpBitor, profile) |
542 | LINK(OpBitnot, profile) |
543 | LINK(OpBitxor, profile) |
544 | LINK(OpLshift, profile) |
545 | LINK(OpRshift, profile) |
546 | |
547 | LINK(OpGetById, profile) |
548 | |
549 | LINK(OpCall, profile) |
550 | LINK(OpTailCall, profile) |
551 | LINK(OpCallEval, profile) |
552 | LINK(OpConstruct, profile) |
553 | |
554 | LINK(OpInByVal) |
555 | LINK(OpPutByVal) |
556 | LINK(OpPutByValDirect) |
557 | |
558 | LINK(OpNewArray) |
559 | LINK(OpNewArrayWithSize) |
560 | LINK(OpNewArrayBuffer, arrayAllocationProfile) |
561 | |
562 | LINK(OpNewObject, objectAllocationProfile) |
563 | |
564 | LINK(OpPutById) |
565 | LINK(OpCreateThis) |
566 | LINK(OpCreatePromise) |
567 | LINK(OpCreateGenerator) |
568 | |
569 | LINK(OpAdd) |
570 | LINK(OpMul) |
571 | LINK(OpDiv) |
572 | LINK(OpSub) |
573 | |
574 | LINK(OpNegate) |
575 | LINK(OpInc) |
576 | LINK(OpDec) |
577 | |
578 | LINK(OpJneqPtr) |
579 | |
580 | LINK(OpCatch) |
581 | LINK(OpProfileControlFlow) |
582 | |
583 | case op_resolve_scope: { |
584 | INITIALIZE_METADATA(OpResolveScope) |
585 | |
586 | const Identifier& ident = identifier(bytecode.m_var); |
587 | RELEASE_ASSERT(bytecode.m_resolveType != LocalClosureVar); |
588 | |
589 | ResolveOp op = JSScope::abstractResolve(m_globalObject.get(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization); |
590 | RETURN_IF_EXCEPTION(throwScope, false); |
591 | |
592 | metadata.m_resolveType = op.type; |
593 | metadata.m_localScopeDepth = op.depth; |
594 | if (op.lexicalEnvironment) { |
595 | if (op.type == ModuleVar) { |
596 | // Keep the linked module environment strongly referenced. |
597 | if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry) |
598 | addConstant(ConcurrentJSLocker(m_lock), op.lexicalEnvironment); |
599 | metadata.m_lexicalEnvironment.set(vm, this, op.lexicalEnvironment); |
600 | } else |
601 | metadata.m_symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable()); |
602 | } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) { |
603 | metadata.m_constantScope.set(vm, this, constantScope); |
604 | if (op.type == GlobalProperty || op.type == GlobalPropertyWithVarInjectionChecks) |
605 | metadata.m_globalLexicalBindingEpoch = m_globalObject->globalLexicalBindingEpoch(); |
606 | } else |
607 | metadata.m_globalObject.clear(); |
608 | break; |
609 | } |
610 | |
611 | case op_get_from_scope: { |
612 | INITIALIZE_METADATA(OpGetFromScope) |
613 | |
614 | link_profile(instruction, bytecode, metadata); |
615 | metadata.m_watchpointSet = nullptr; |
616 | |
617 | ASSERT(!isInitialization(bytecode.m_getPutInfo.initializationMode())); |
618 | if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) { |
619 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode()); |
620 | break; |
621 | } |
622 | |
623 | const Identifier& ident = identifier(bytecode.m_var); |
624 | ResolveOp op = JSScope::abstractResolve(m_globalObject.get(), bytecode.m_localScopeDepth, scope, ident, Get, bytecode.m_getPutInfo.resolveType(), InitializationMode::NotInitialization); |
625 | RETURN_IF_EXCEPTION(throwScope, false); |
626 | |
627 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode()); |
628 | if (op.type == ModuleVar) |
629 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), ClosureVar, bytecode.m_getPutInfo.initializationMode()); |
630 | if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) |
631 | metadata.m_watchpointSet = op.watchpointSet; |
632 | else if (op.structure) |
633 | metadata.m_structure.set(vm, this, op.structure); |
634 | metadata.m_operand = op.operand; |
635 | break; |
636 | } |
637 | |
638 | case op_put_to_scope: { |
639 | INITIALIZE_METADATA(OpPutToScope) |
640 | |
641 | if (bytecode.m_getPutInfo.resolveType() == LocalClosureVar) { |
642 | // Only do watching if the property we're putting to is not anonymous. |
643 | if (bytecode.m_var != UINT_MAX) { |
644 | SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.m_symbolTableOrScopeDepth.symbolTable().offset())); |
645 | const Identifier& ident = identifier(bytecode.m_var); |
646 | ConcurrentJSLocker locker(symbolTable->m_lock); |
647 | auto iter = symbolTable->find(locker, ident.impl()); |
648 | ASSERT(iter != symbolTable->end(locker)); |
649 | iter->value.prepareToWatch(); |
650 | metadata.m_watchpointSet = iter->value.watchpointSet(); |
651 | } else |
652 | metadata.m_watchpointSet = nullptr; |
653 | break; |
654 | } |
655 | |
656 | const Identifier& ident = identifier(bytecode.m_var); |
657 | metadata.m_watchpointSet = nullptr; |
658 | ResolveOp op = JSScope::abstractResolve(m_globalObject.get(), bytecode.m_symbolTableOrScopeDepth.scopeDepth(), scope, ident, Put, bytecode.m_getPutInfo.resolveType(), bytecode.m_getPutInfo.initializationMode()); |
659 | RETURN_IF_EXCEPTION(throwScope, false); |
660 | |
661 | metadata.m_getPutInfo = GetPutInfo(bytecode.m_getPutInfo.resolveMode(), op.type, bytecode.m_getPutInfo.initializationMode()); |
662 | if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) |
663 | metadata.m_watchpointSet = op.watchpointSet; |
664 | else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { |
665 | if (op.watchpointSet) |
666 | op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident)); |
667 | } else if (op.structure) |
668 | metadata.m_structure.set(vm, this, op.structure); |
669 | metadata.m_operand = op.operand; |
670 | break; |
671 | } |
672 | |
673 | case op_profile_type: { |
674 | RELEASE_ASSERT(m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()); |
675 | |
676 | INITIALIZE_METADATA(OpProfileType) |
677 | |
678 | size_t instructionOffset = instruction.offset() + instruction->size() - 1; |
679 | unsigned divotStart, divotEnd; |
680 | GlobalVariableID globalVariableID = 0; |
681 | RefPtr<TypeSet> globalTypeSet; |
682 | bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); |
683 | SymbolTable* symbolTable = nullptr; |
684 | |
685 | switch (bytecode.m_flag) { |
686 | case ProfileTypeBytecodeClosureVar: { |
687 | const Identifier& ident = identifier(bytecode.m_identifier); |
688 | unsigned localScopeDepth = bytecode.m_symbolTableOrScopeDepth.scopeDepth(); |
689 | // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because |
690 | // we're abstractly "read"ing from a JSScope. |
691 | ResolveOp op = JSScope::abstractResolve(m_globalObject.get(), localScopeDepth, scope, ident, Get, bytecode.m_resolveType, InitializationMode::NotInitialization); |
692 | RETURN_IF_EXCEPTION(throwScope, false); |
693 | |
694 | if (op.type == ClosureVar || op.type == ModuleVar) |
695 | symbolTable = op.lexicalEnvironment->symbolTable(); |
696 | else if (op.type == GlobalVar) |
697 | symbolTable = m_globalObject.get()->symbolTable(); |
698 | |
699 | UniquedStringImpl* impl = (op.type == ModuleVar) ? op.importedName.get() : ident.impl(); |
700 | if (symbolTable) { |
701 | ConcurrentJSLocker locker(symbolTable->m_lock); |
702 | // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. |
703 | symbolTable->prepareForTypeProfiling(locker); |
704 | globalVariableID = symbolTable->uniqueIDForVariable(locker, impl, vm); |
705 | globalTypeSet = symbolTable->globalTypeSetForVariable(locker, impl, vm); |
706 | } else |
707 | globalVariableID = TypeProfilerNoGlobalIDExists; |
708 | |
709 | break; |
710 | } |
711 | case ProfileTypeBytecodeLocallyResolved: { |
712 | int symbolTableIndex = bytecode.m_symbolTableOrScopeDepth.symbolTable().offset(); |
713 | SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); |
714 | const Identifier& ident = identifier(bytecode.m_identifier); |
715 | ConcurrentJSLocker locker(symbolTable->m_lock); |
716 | // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. |
717 | globalVariableID = symbolTable->uniqueIDForVariable(locker, ident.impl(), vm); |
718 | globalTypeSet = symbolTable->globalTypeSetForVariable(locker, ident.impl(), vm); |
719 | |
720 | break; |
721 | } |
722 | case ProfileTypeBytecodeDoesNotHaveGlobalID: |
723 | case ProfileTypeBytecodeFunctionArgument: { |
724 | globalVariableID = TypeProfilerNoGlobalIDExists; |
725 | break; |
726 | } |
727 | case ProfileTypeBytecodeFunctionReturnStatement: { |
728 | RELEASE_ASSERT(ownerExecutable->isFunctionExecutable()); |
729 | globalTypeSet = jsCast<FunctionExecutable*>(ownerExecutable)->returnStatementTypeSet(); |
730 | globalVariableID = TypeProfilerReturnStatement; |
731 | if (!shouldAnalyze) { |
732 | // Because a return statement can be added implicitly to return undefined at the end of a function, |
733 | // and these nodes don't emit expression ranges because they aren't in the actual source text of |
734 | // the user's program, give the type profiler some range to identify these return statements. |
735 | // Currently, the text offset that is used as identification is "f" in the function keyword |
736 | // and is stored on TypeLocation's m_divotForFunctionOffsetIfReturnStatement member variable. |
737 | divotStart = divotEnd = ownerExecutable->typeProfilingStartOffset(vm); |
738 | shouldAnalyze = true; |
739 | } |
740 | break; |
741 | } |
742 | } |
743 | |
744 | std::pair<TypeLocation*, bool> locationPair = vm.typeProfiler()->typeLocationCache()->getTypeLocation(globalVariableID, |
745 | ownerExecutable->sourceID(), divotStart, divotEnd, WTFMove(globalTypeSet), &vm); |
746 | TypeLocation* location = locationPair.first; |
747 | bool isNewLocation = locationPair.second; |
748 | |
749 | if (bytecode.m_flag == ProfileTypeBytecodeFunctionReturnStatement) |
750 | location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(vm); |
751 | |
752 | if (shouldAnalyze && isNewLocation) |
753 | vm.typeProfiler()->insertNewLocation(location); |
754 | |
755 | metadata.m_typeLocation = location; |
756 | break; |
757 | } |
758 | |
759 | case op_debug: { |
760 | if (instruction->as<OpDebug>().m_debugHookType == DidReachBreakpoint) |
761 | m_hasDebuggerStatement = true; |
762 | break; |
763 | } |
764 | |
765 | case op_create_rest: { |
766 | int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().m_numParametersToSkip; |
767 | ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0); |
768 | // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT."); |
769 | m_numberOfArgumentsToSkip = numberOfArgumentsToSkip; |
770 | break; |
771 | } |
772 | |
773 | default: |
774 | break; |
775 | } |
776 | } |
777 | |
778 | #undef CASE |
779 | #undef INITIALIZE_METADATA |
780 | #undef LINK_FIELD |
781 | #undef LINK |
782 | |
783 | if (m_unlinkedCode->wasCompiledWithControlFlowProfilerOpcodes()) |
784 | insertBasicBlockBoundariesForControlFlowProfiler(); |
785 | |
786 | // Set optimization thresholds only after instructions is initialized, since these |
787 | // rely on the instruction count (and are in theory permitted to also inspect the |
788 | // instruction stream to more accurate assess the cost of tier-up). |
789 | optimizeAfterWarmUp(); |
790 | jitAfterWarmUp(); |
791 | |
792 | // If the concurrent thread will want the code block's hash, then compute it here |
793 | // synchronously. |
794 | if (Options::alwaysComputeHash()) |
795 | hash(); |
796 | |
797 | if (Options::dumpGeneratedBytecodes()) |
798 | dumpBytecode(); |
799 | |
800 | if (m_metadata) |
801 | vm.heap.reportExtraMemoryAllocated(m_metadata->sizeInBytes()); |
802 | |
803 | return true; |
804 | } |
805 | |
806 | void CodeBlock::finishCreationCommon(VM& vm) |
807 | { |
808 | m_ownerEdge.set(vm, this, ExecutableToCodeBlockEdge::create(vm, this)); |
809 | } |
810 | |
811 | CodeBlock::~CodeBlock() |
812 | { |
813 | VM& vm = *m_vm; |
814 | |
815 | #if ENABLE(DFG_JIT) |
816 | // The JITCode (and its corresponding DFG::CommonData) may outlive the CodeBlock by |
817 | // a short amount of time after the CodeBlock is destructed. For example, the |
818 | // Interpreter::execute methods will ref JITCode before invoking it. This can |
819 | // result in the JITCode having a non-zero refCount when its owner CodeBlock is |
820 | // destructed. |
821 | // |
822 | // Hence, we cannot rely on DFG::CommonData destruction to clear these now invalid |
823 | // watchpoints in a timely manner. We'll ensure they are cleared here eagerly. |
824 | // |
825 | // We only need to do this for a DFG/FTL CodeBlock because only these will have a |
826 | // DFG:CommonData. Hence, the LLInt and Baseline will not have any of these watchpoints. |
827 | // |
828 | // Note also that the LLIntPrototypeLoadAdaptiveStructureWatchpoint is also related |
829 | // to the CodeBlock. However, its lifecycle is tied directly to the CodeBlock, and |
830 | // will be automatically cleared when the CodeBlock destructs. |
831 | |
832 | if (JITCode::isOptimizingJIT(jitType())) |
833 | jitCode()->dfgCommon()->clearWatchpoints(); |
834 | #endif |
835 | vm.heap.codeBlockSet().remove(this); |
836 | |
837 | if (UNLIKELY(vm.m_perBytecodeProfiler)) |
838 | vm.m_perBytecodeProfiler->notifyDestruction(this); |
839 | |
840 | if (!vm.heap.isShuttingDown() && unlinkedCodeBlock()->didOptimize() == MixedTriState) |
841 | unlinkedCodeBlock()->setDidOptimize(FalseTriState); |
842 | |
843 | #if ENABLE(VERBOSE_VALUE_PROFILE) |
844 | dumpValueProfiles(); |
845 | #endif |
846 | |
847 | // We may be destroyed before any CodeBlocks that refer to us are destroyed. |
848 | // Consider that two CodeBlocks become unreachable at the same time. There |
849 | // is no guarantee about the order in which the CodeBlocks are destroyed. |
850 | // So, if we don't remove incoming calls, and get destroyed before the |
851 | // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's |
852 | // destructor will try to remove nodes from our (no longer valid) linked list. |
853 | unlinkIncomingCalls(); |
854 | |
855 | // Note that our outgoing calls will be removed from other CodeBlocks' |
856 | // m_incomingCalls linked lists through the execution of the ~CallLinkInfo |
857 | // destructors. |
858 | |
859 | #if ENABLE(JIT) |
860 | if (auto* jitData = m_jitData.get()) { |
861 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) { |
862 | stubInfo->aboutToDie(); |
863 | stubInfo->deref(); |
864 | } |
865 | } |
866 | #endif // ENABLE(JIT) |
867 | } |
868 | |
869 | void CodeBlock::setConstantIdentifierSetRegisters(VM& vm, const Vector<ConstantIdentifierSetEntry>& constants) |
870 | { |
871 | auto scope = DECLARE_THROW_SCOPE(vm); |
872 | JSGlobalObject* globalObject = m_globalObject.get(); |
873 | |
874 | for (const auto& entry : constants) { |
875 | const IdentifierSet& set = entry.first; |
876 | |
877 | Structure* setStructure = globalObject->setStructure(); |
878 | RETURN_IF_EXCEPTION(scope, void()); |
879 | JSSet* jsSet = JSSet::create(globalObject, vm, setStructure, set.size()); |
880 | RETURN_IF_EXCEPTION(scope, void()); |
881 | |
882 | for (const auto& setEntry : set) { |
883 | JSString* jsString = jsOwnedString(vm, setEntry.get()); |
884 | jsSet->add(globalObject, jsString); |
885 | RETURN_IF_EXCEPTION(scope, void()); |
886 | } |
887 | m_constantRegisters[entry.second].set(vm, this, jsSet); |
888 | } |
889 | } |
890 | |
891 | void CodeBlock::setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable) |
892 | { |
893 | VM& vm = *m_vm; |
894 | auto scope = DECLARE_THROW_SCOPE(vm); |
895 | JSGlobalObject* globalObject = m_globalObject.get(); |
896 | |
897 | ASSERT(constants.size() == constantsSourceCodeRepresentation.size()); |
898 | size_t count = constants.size(); |
899 | { |
900 | ConcurrentJSLocker locker(m_lock); |
901 | m_constantRegisters.resizeToFit(count); |
902 | } |
903 | for (size_t i = 0; i < count; i++) { |
904 | JSValue constant = constants[i].get(); |
905 | switch (constantsSourceCodeRepresentation[i]) { |
906 | case SourceCodeRepresentation::LinkTimeConstant: |
907 | constant = globalObject->linkTimeConstant(static_cast<LinkTimeConstant>(constant.asInt32AsAnyInt())); |
908 | break; |
909 | case SourceCodeRepresentation::Other: |
910 | case SourceCodeRepresentation::Integer: |
911 | case SourceCodeRepresentation::Double: |
912 | if (!constant.isEmpty()) { |
913 | if (constant.isCell()) { |
914 | JSCell* cell = constant.asCell(); |
915 | if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm, cell)) { |
916 | if (m_unlinkedCode->wasCompiledWithTypeProfilerOpcodes()) { |
917 | ConcurrentJSLocker locker(symbolTable->m_lock); |
918 | symbolTable->prepareForTypeProfiling(locker); |
919 | } |
920 | |
921 | SymbolTable* clone = symbolTable->cloneScopePart(vm); |
922 | if (wasCompiledWithDebuggingOpcodes()) |
923 | clone->setRareDataCodeBlock(this); |
924 | |
925 | constant = clone; |
926 | } else if (auto* descriptor = jsDynamicCast<JSTemplateObjectDescriptor*>(vm, cell)) { |
927 | auto* templateObject = topLevelExecutable->createTemplateObject(globalObject, descriptor); |
928 | RETURN_IF_EXCEPTION(scope, void()); |
929 | constant = templateObject; |
930 | } |
931 | } |
932 | } |
933 | break; |
934 | } |
935 | m_constantRegisters[i].set(vm, this, constant); |
936 | } |
937 | |
938 | m_constantsSourceCodeRepresentation = constantsSourceCodeRepresentation; |
939 | } |
940 | |
941 | void CodeBlock::setAlternative(VM& vm, CodeBlock* alternative) |
942 | { |
943 | RELEASE_ASSERT(alternative); |
944 | RELEASE_ASSERT(alternative->jitCode()); |
945 | m_alternative.set(vm, this, alternative); |
946 | } |
947 | |
948 | void CodeBlock::setNumParameters(int newValue) |
949 | { |
950 | m_numParameters = newValue; |
951 | |
952 | m_argumentValueProfiles = RefCountedArray<ValueProfile>(vm().canUseJIT() ? newValue : 0); |
953 | } |
954 | |
955 | CodeBlock* CodeBlock::specialOSREntryBlockOrNull() |
956 | { |
957 | #if ENABLE(FTL_JIT) |
958 | if (jitType() != JITType::DFGJIT) |
959 | return 0; |
960 | DFG::JITCode* jitCode = m_jitCode->dfg(); |
961 | return jitCode->osrEntryBlock(); |
962 | #else // ENABLE(FTL_JIT) |
963 | return 0; |
964 | #endif // ENABLE(FTL_JIT) |
965 | } |
966 | |
967 | size_t CodeBlock::estimatedSize(JSCell* cell, VM& vm) |
968 | { |
969 | CodeBlock* thisObject = jsCast<CodeBlock*>(cell); |
970 | size_t = 0; |
971 | if (thisObject->m_metadata) |
972 | extraMemoryAllocated += thisObject->m_metadata->sizeInBytes(); |
973 | RefPtr<JITCode> jitCode = thisObject->m_jitCode; |
974 | if (jitCode && !jitCode->isShared()) |
975 | extraMemoryAllocated += jitCode->size(); |
976 | return Base::estimatedSize(cell, vm) + extraMemoryAllocated; |
977 | } |
978 | |
979 | void CodeBlock::visitChildren(JSCell* cell, SlotVisitor& visitor) |
980 | { |
981 | CodeBlock* thisObject = jsCast<CodeBlock*>(cell); |
982 | ASSERT_GC_OBJECT_INHERITS(thisObject, info()); |
983 | Base::visitChildren(cell, visitor); |
984 | visitor.append(thisObject->m_ownerEdge); |
985 | thisObject->visitChildren(visitor); |
986 | } |
987 | |
988 | void CodeBlock::visitChildren(SlotVisitor& visitor) |
989 | { |
990 | ConcurrentJSLocker locker(m_lock); |
991 | if (CodeBlock* otherBlock = specialOSREntryBlockOrNull()) |
992 | visitor.appendUnbarriered(otherBlock); |
993 | |
994 | size_t = 0; |
995 | if (m_metadata) |
996 | extraMemory += m_metadata->sizeInBytes(); |
997 | if (m_jitCode && !m_jitCode->isShared()) |
998 | extraMemory += m_jitCode->size(); |
999 | visitor.reportExtraMemoryVisited(extraMemory); |
1000 | |
1001 | stronglyVisitStrongReferences(locker, visitor); |
1002 | stronglyVisitWeakReferences(locker, visitor); |
1003 | |
1004 | VM::SpaceAndSet::setFor(*subspace()).add(this); |
1005 | } |
1006 | |
1007 | bool CodeBlock::shouldVisitStrongly(const ConcurrentJSLocker& locker) |
1008 | { |
1009 | if (Options::forceCodeBlockLiveness()) |
1010 | return true; |
1011 | |
1012 | if (shouldJettisonDueToOldAge(locker)) |
1013 | return false; |
1014 | |
1015 | // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when |
1016 | // their weak references go stale. So if a basline JIT CodeBlock gets |
1017 | // scanned, we can assume that this means that it's live. |
1018 | if (!JITCode::isOptimizingJIT(jitType())) |
1019 | return true; |
1020 | |
1021 | return false; |
1022 | } |
1023 | |
1024 | bool CodeBlock::shouldJettisonDueToWeakReference(VM& vm) |
1025 | { |
1026 | if (!JITCode::isOptimizingJIT(jitType())) |
1027 | return false; |
1028 | return !vm.heap.isMarked(this); |
1029 | } |
1030 | |
1031 | static Seconds timeToLive(JITType jitType) |
1032 | { |
1033 | if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) { |
1034 | switch (jitType) { |
1035 | case JITType::InterpreterThunk: |
1036 | return 10_ms; |
1037 | case JITType::BaselineJIT: |
1038 | return 30_ms; |
1039 | case JITType::DFGJIT: |
1040 | return 40_ms; |
1041 | case JITType::FTLJIT: |
1042 | return 120_ms; |
1043 | default: |
1044 | return Seconds::infinity(); |
1045 | } |
1046 | } |
1047 | |
1048 | switch (jitType) { |
1049 | case JITType::InterpreterThunk: |
1050 | return 5_s; |
1051 | case JITType::BaselineJIT: |
1052 | // Effectively 10 additional seconds, since BaselineJIT and |
1053 | // InterpreterThunk share a CodeBlock. |
1054 | return 15_s; |
1055 | case JITType::DFGJIT: |
1056 | return 20_s; |
1057 | case JITType::FTLJIT: |
1058 | return 60_s; |
1059 | default: |
1060 | return Seconds::infinity(); |
1061 | } |
1062 | } |
1063 | |
1064 | bool CodeBlock::shouldJettisonDueToOldAge(const ConcurrentJSLocker&) |
1065 | { |
1066 | if (m_vm->heap.isMarked(this)) |
1067 | return false; |
1068 | |
1069 | if (UNLIKELY(Options::forceCodeBlockToJettisonDueToOldAge())) |
1070 | return true; |
1071 | |
1072 | if (timeSinceCreation() < timeToLive(jitType())) |
1073 | return false; |
1074 | |
1075 | return true; |
1076 | } |
1077 | |
1078 | #if ENABLE(DFG_JIT) |
1079 | static bool shouldMarkTransition(VM& vm, DFG::WeakReferenceTransition& transition) |
1080 | { |
1081 | if (transition.m_codeOrigin && !vm.heap.isMarked(transition.m_codeOrigin.get())) |
1082 | return false; |
1083 | |
1084 | if (!vm.heap.isMarked(transition.m_from.get())) |
1085 | return false; |
1086 | |
1087 | return true; |
1088 | } |
1089 | #endif // ENABLE(DFG_JIT) |
1090 | |
1091 | void CodeBlock::propagateTransitions(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1092 | { |
1093 | UNUSED_PARAM(visitor); |
1094 | |
1095 | VM& vm = *m_vm; |
1096 | |
1097 | if (jitType() == JITType::InterpreterThunk) { |
1098 | if (m_metadata) { |
1099 | m_metadata->forEach<OpPutById>([&] (auto& metadata) { |
1100 | StructureID oldStructureID = metadata.m_oldStructureID; |
1101 | StructureID newStructureID = metadata.m_newStructureID; |
1102 | if (!oldStructureID || !newStructureID) |
1103 | return; |
1104 | Structure* oldStructure = |
1105 | vm.heap.structureIDTable().get(oldStructureID); |
1106 | Structure* newStructure = |
1107 | vm.heap.structureIDTable().get(newStructureID); |
1108 | if (vm.heap.isMarked(oldStructure)) |
1109 | visitor.appendUnbarriered(newStructure); |
1110 | }); |
1111 | } |
1112 | } |
1113 | |
1114 | #if ENABLE(JIT) |
1115 | if (JITCode::isJIT(jitType())) { |
1116 | if (auto* jitData = m_jitData.get()) { |
1117 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) |
1118 | stubInfo->propagateTransitions(visitor); |
1119 | } |
1120 | } |
1121 | #endif // ENABLE(JIT) |
1122 | |
1123 | #if ENABLE(DFG_JIT) |
1124 | if (JITCode::isOptimizingJIT(jitType())) { |
1125 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1126 | |
1127 | dfgCommon->recordedStatuses.markIfCheap(visitor); |
1128 | |
1129 | for (auto& weakReference : dfgCommon->weakStructureReferences) |
1130 | weakReference->markIfCheap(visitor); |
1131 | |
1132 | for (auto& transition : dfgCommon->transitions) { |
1133 | if (shouldMarkTransition(vm, transition)) { |
1134 | // If the following three things are live, then the target of the |
1135 | // transition is also live: |
1136 | // |
1137 | // - This code block. We know it's live already because otherwise |
1138 | // we wouldn't be scanning ourselves. |
1139 | // |
1140 | // - The code origin of the transition. Transitions may arise from |
1141 | // code that was inlined. They are not relevant if the user's |
1142 | // object that is required for the inlinee to run is no longer |
1143 | // live. |
1144 | // |
1145 | // - The source of the transition. The transition checks if some |
1146 | // heap location holds the source, and if so, stores the target. |
1147 | // Hence the source must be live for the transition to be live. |
1148 | // |
1149 | // We also short-circuit the liveness if the structure is harmless |
1150 | // to mark (i.e. its global object and prototype are both already |
1151 | // live). |
1152 | |
1153 | visitor.append(transition.m_to); |
1154 | } |
1155 | } |
1156 | } |
1157 | #endif // ENABLE(DFG_JIT) |
1158 | } |
1159 | |
1160 | void CodeBlock::determineLiveness(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1161 | { |
1162 | UNUSED_PARAM(visitor); |
1163 | |
1164 | #if ENABLE(DFG_JIT) |
1165 | VM& vm = *m_vm; |
1166 | if (vm.heap.isMarked(this)) |
1167 | return; |
1168 | |
1169 | // In rare and weird cases, this could be called on a baseline CodeBlock. One that I found was |
1170 | // that we might decide that the CodeBlock should be jettisoned due to old age, so the |
1171 | // isMarked check doesn't protect us. |
1172 | if (!JITCode::isOptimizingJIT(jitType())) |
1173 | return; |
1174 | |
1175 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1176 | // Now check all of our weak references. If all of them are live, then we |
1177 | // have proved liveness and so we scan our strong references. If at end of |
1178 | // GC we still have not proved liveness, then this code block is toast. |
1179 | bool allAreLiveSoFar = true; |
1180 | for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { |
1181 | JSCell* reference = dfgCommon->weakReferences[i].get(); |
1182 | ASSERT(!jsDynamicCast<CodeBlock*>(vm, reference)); |
1183 | if (!vm.heap.isMarked(reference)) { |
1184 | allAreLiveSoFar = false; |
1185 | break; |
1186 | } |
1187 | } |
1188 | if (allAreLiveSoFar) { |
1189 | for (unsigned i = 0; i < dfgCommon->weakStructureReferences.size(); ++i) { |
1190 | if (!vm.heap.isMarked(dfgCommon->weakStructureReferences[i].get())) { |
1191 | allAreLiveSoFar = false; |
1192 | break; |
1193 | } |
1194 | } |
1195 | } |
1196 | |
1197 | // If some weak references are dead, then this fixpoint iteration was |
1198 | // unsuccessful. |
1199 | if (!allAreLiveSoFar) |
1200 | return; |
1201 | |
1202 | // All weak references are live. Record this information so we don't |
1203 | // come back here again, and scan the strong references. |
1204 | visitor.appendUnbarriered(this); |
1205 | #endif // ENABLE(DFG_JIT) |
1206 | } |
1207 | |
1208 | void CodeBlock::finalizeLLIntInlineCaches() |
1209 | { |
1210 | VM& vm = *m_vm; |
1211 | |
1212 | if (m_metadata) { |
1213 | // FIXME: https://bugs.webkit.org/show_bug.cgi?id=166418 |
1214 | // We need to add optimizations for op_resolve_scope_for_hoisting_func_decl_in_eval to do link time scope resolution. |
1215 | |
1216 | m_metadata->forEach<OpGetById>([&] (auto& metadata) { |
1217 | if (metadata.m_modeMetadata.mode != GetByIdMode::Default) |
1218 | return; |
1219 | StructureID oldStructureID = metadata.m_modeMetadata.defaultMode.structureID; |
1220 | if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID))) |
1221 | return; |
1222 | if (Options::verboseOSR()) |
1223 | dataLogF("Clearing LLInt property access.\n" ); |
1224 | LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata); |
1225 | }); |
1226 | |
1227 | m_metadata->forEach<OpGetByIdDirect>([&] (auto& metadata) { |
1228 | StructureID oldStructureID = metadata.m_structureID; |
1229 | if (!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID))) |
1230 | return; |
1231 | if (Options::verboseOSR()) |
1232 | dataLogF("Clearing LLInt property access.\n" ); |
1233 | metadata.m_structureID = 0; |
1234 | metadata.m_offset = 0; |
1235 | }); |
1236 | |
1237 | m_metadata->forEach<OpPutById>([&] (auto& metadata) { |
1238 | StructureID oldStructureID = metadata.m_oldStructureID; |
1239 | StructureID newStructureID = metadata.m_newStructureID; |
1240 | StructureChain* chain = metadata.m_structureChain.get(); |
1241 | if ((!oldStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(oldStructureID))) |
1242 | && (!newStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(newStructureID))) |
1243 | && (!chain || vm.heap.isMarked(chain))) |
1244 | return; |
1245 | if (Options::verboseOSR()) |
1246 | dataLogF("Clearing LLInt put transition.\n" ); |
1247 | metadata.m_oldStructureID = 0; |
1248 | metadata.m_offset = 0; |
1249 | metadata.m_newStructureID = 0; |
1250 | metadata.m_structureChain.clear(); |
1251 | }); |
1252 | |
1253 | m_metadata->forEach<OpToThis>([&] (auto& metadata) { |
1254 | if (!metadata.m_cachedStructureID || vm.heap.isMarked(vm.heap.structureIDTable().get(metadata.m_cachedStructureID))) |
1255 | return; |
1256 | if (Options::verboseOSR()) { |
1257 | Structure* structure = vm.heap.structureIDTable().get(metadata.m_cachedStructureID); |
1258 | dataLogF("Clearing LLInt to_this with structure %p.\n" , structure); |
1259 | } |
1260 | metadata.m_cachedStructureID = 0; |
1261 | metadata.m_toThisStatus = merge(metadata.m_toThisStatus, ToThisClearedByGC); |
1262 | }); |
1263 | |
1264 | auto handleCreateBytecode = [&] (auto& metadata, ASCIILiteral name) { |
1265 | auto& cacheWriteBarrier = metadata.m_cachedCallee; |
1266 | if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects()) |
1267 | return; |
1268 | JSCell* cachedFunction = cacheWriteBarrier.get(); |
1269 | if (vm.heap.isMarked(cachedFunction)) |
1270 | return; |
1271 | dataLogLnIf(Options::verboseOSR(), "Clearing LLInt " , name, " with cached callee " , RawPointer(cachedFunction), "." ); |
1272 | cacheWriteBarrier.clear(); |
1273 | }; |
1274 | |
1275 | m_metadata->forEach<OpCreateThis>([&] (auto& metadata) { |
1276 | handleCreateBytecode(metadata, "op_create_this"_s ); |
1277 | }); |
1278 | m_metadata->forEach<OpCreatePromise>([&] (auto& metadata) { |
1279 | handleCreateBytecode(metadata, "op_create_promise"_s ); |
1280 | }); |
1281 | m_metadata->forEach<OpCreateGenerator>([&] (auto& metadata) { |
1282 | handleCreateBytecode(metadata, "op_create_generator"_s ); |
1283 | }); |
1284 | m_metadata->forEach<OpCreateAsyncGenerator>([&] (auto& metadata) { |
1285 | handleCreateBytecode(metadata, "op_create_async_generator"_s ); |
1286 | }); |
1287 | |
1288 | m_metadata->forEach<OpResolveScope>([&] (auto& metadata) { |
1289 | // Right now this isn't strictly necessary. Any symbol tables that this will refer to |
1290 | // are for outer functions, and we refer to those functions strongly, and they refer |
1291 | // to the symbol table strongly. But it's nice to be on the safe side. |
1292 | WriteBarrierBase<SymbolTable>& symbolTable = metadata.m_symbolTable; |
1293 | if (!symbolTable || vm.heap.isMarked(symbolTable.get())) |
1294 | return; |
1295 | if (Options::verboseOSR()) |
1296 | dataLogF("Clearing dead symbolTable %p.\n" , symbolTable.get()); |
1297 | symbolTable.clear(); |
1298 | }); |
1299 | |
1300 | auto handleGetPutFromScope = [&] (auto& metadata) { |
1301 | GetPutInfo getPutInfo = metadata.m_getPutInfo; |
1302 | if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks |
1303 | || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) |
1304 | return; |
1305 | WriteBarrierBase<Structure>& structure = metadata.m_structure; |
1306 | if (!structure || vm.heap.isMarked(structure.get())) |
1307 | return; |
1308 | if (Options::verboseOSR()) |
1309 | dataLogF("Clearing scope access with structure %p.\n" , structure.get()); |
1310 | structure.clear(); |
1311 | }; |
1312 | |
1313 | m_metadata->forEach<OpGetFromScope>(handleGetPutFromScope); |
1314 | m_metadata->forEach<OpPutToScope>(handleGetPutFromScope); |
1315 | } |
1316 | |
1317 | // We can't just remove all the sets when we clear the caches since we might have created a watchpoint set |
1318 | // then cleared the cache without GCing in between. |
1319 | m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool { |
1320 | auto clear = [&] () { |
1321 | auto& instruction = instructions().at(std::get<1>(pair.key)); |
1322 | OpcodeID opcode = instruction->opcodeID(); |
1323 | if (opcode == op_get_by_id) { |
1324 | if (Options::verboseOSR()) |
1325 | dataLogF("Clearing LLInt property access.\n" ); |
1326 | LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this)); |
1327 | } |
1328 | return true; |
1329 | }; |
1330 | |
1331 | if (!vm.heap.isMarked(vm.heap.structureIDTable().get(std::get<0>(pair.key)))) |
1332 | return clear(); |
1333 | |
1334 | for (const LLIntPrototypeLoadAdaptiveStructureWatchpoint& watchpoint : pair.value) { |
1335 | if (!watchpoint.key().isStillLive(vm)) |
1336 | return clear(); |
1337 | } |
1338 | |
1339 | return false; |
1340 | }); |
1341 | |
1342 | forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) { |
1343 | if (callLinkInfo.isLinked() && !vm.heap.isMarked(callLinkInfo.callee())) { |
1344 | if (Options::verboseOSR()) |
1345 | dataLog("Clearing LLInt call from " , *this, "\n" ); |
1346 | callLinkInfo.unlink(); |
1347 | } |
1348 | if (callLinkInfo.lastSeenCallee() && !vm.heap.isMarked(callLinkInfo.lastSeenCallee())) |
1349 | callLinkInfo.clearLastSeenCallee(); |
1350 | }); |
1351 | } |
1352 | |
1353 | #if ENABLE(JIT) |
1354 | CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&) |
1355 | { |
1356 | ASSERT(!m_jitData); |
1357 | auto jitData = makeUnique<JITData>(); |
1358 | // calleeSaveRegisters() can access m_jitData without taking a lock from Baseline JIT. This is OK since JITData::m_calleeSaveRegisters is filled in DFG and FTL CodeBlocks. |
1359 | // But we should not see garbage pointer in that case. We ensure JITData::m_calleeSaveRegisters is initialized as nullptr before exposing it to BaselineJIT by store-store-fence. |
1360 | WTF::storeStoreFence(); |
1361 | m_jitData = WTFMove(jitData); |
1362 | return *m_jitData; |
1363 | } |
1364 | |
1365 | void CodeBlock::finalizeBaselineJITInlineCaches() |
1366 | { |
1367 | if (auto* jitData = m_jitData.get()) { |
1368 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) |
1369 | callLinkInfo->visitWeak(vm()); |
1370 | |
1371 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) |
1372 | stubInfo->visitWeakReferences(this); |
1373 | } |
1374 | } |
1375 | #endif |
1376 | |
1377 | void CodeBlock::finalizeUnconditionally(VM& vm) |
1378 | { |
1379 | UNUSED_PARAM(vm); |
1380 | |
1381 | updateAllPredictions(); |
1382 | |
1383 | if (JITCode::couldBeInterpreted(jitType())) |
1384 | finalizeLLIntInlineCaches(); |
1385 | |
1386 | #if ENABLE(JIT) |
1387 | if (!!jitCode()) |
1388 | finalizeBaselineJITInlineCaches(); |
1389 | #endif |
1390 | |
1391 | #if ENABLE(DFG_JIT) |
1392 | if (JITCode::isOptimizingJIT(jitType())) { |
1393 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1394 | dfgCommon->recordedStatuses.finalize(vm); |
1395 | } |
1396 | #endif // ENABLE(DFG_JIT) |
1397 | |
1398 | auto updateActivity = [&] { |
1399 | if (!VM::useUnlinkedCodeBlockJettisoning()) |
1400 | return; |
1401 | JITCode* jitCode = m_jitCode.get(); |
1402 | double count = 0; |
1403 | bool alwaysActive = false; |
1404 | switch (JITCode::jitTypeFor(jitCode)) { |
1405 | case JITType::None: |
1406 | case JITType::HostCallThunk: |
1407 | return; |
1408 | case JITType::InterpreterThunk: |
1409 | count = m_llintExecuteCounter.count(); |
1410 | break; |
1411 | case JITType::BaselineJIT: |
1412 | count = m_jitExecuteCounter.count(); |
1413 | break; |
1414 | case JITType::DFGJIT: |
1415 | #if ENABLE(FTL_JIT) |
1416 | count = static_cast<DFG::JITCode*>(jitCode)->tierUpCounter.count(); |
1417 | #else |
1418 | alwaysActive = true; |
1419 | #endif |
1420 | break; |
1421 | case JITType::FTLJIT: |
1422 | alwaysActive = true; |
1423 | break; |
1424 | } |
1425 | if (alwaysActive || m_previousCounter < count) { |
1426 | // CodeBlock is active right now, so resetting UnlinkedCodeBlock's age. |
1427 | m_unlinkedCode->resetAge(); |
1428 | } |
1429 | m_previousCounter = count; |
1430 | }; |
1431 | updateActivity(); |
1432 | |
1433 | VM::SpaceAndSet::setFor(*subspace()).remove(this); |
1434 | } |
1435 | |
1436 | void CodeBlock::destroy(JSCell* cell) |
1437 | { |
1438 | static_cast<CodeBlock*>(cell)->~CodeBlock(); |
1439 | } |
1440 | |
1441 | void CodeBlock::getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result) |
1442 | { |
1443 | #if ENABLE(JIT) |
1444 | if (JITCode::isJIT(jitType())) { |
1445 | if (auto* jitData = m_jitData.get()) { |
1446 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) |
1447 | result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo; |
1448 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) |
1449 | result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo; |
1450 | for (ByValInfo* byValInfo : jitData->m_byValInfos) |
1451 | result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo; |
1452 | } |
1453 | #if ENABLE(DFG_JIT) |
1454 | if (JITCode::isOptimizingJIT(jitType())) { |
1455 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1456 | for (auto& pair : dfgCommon->recordedStatuses.calls) |
1457 | result.add(pair.first, ICStatus()).iterator->value.callStatus = pair.second.get(); |
1458 | for (auto& pair : dfgCommon->recordedStatuses.gets) |
1459 | result.add(pair.first, ICStatus()).iterator->value.getStatus = pair.second.get(); |
1460 | for (auto& pair : dfgCommon->recordedStatuses.puts) |
1461 | result.add(pair.first, ICStatus()).iterator->value.putStatus = pair.second.get(); |
1462 | for (auto& pair : dfgCommon->recordedStatuses.ins) |
1463 | result.add(pair.first, ICStatus()).iterator->value.inStatus = pair.second.get(); |
1464 | } |
1465 | #endif |
1466 | } |
1467 | #else |
1468 | UNUSED_PARAM(result); |
1469 | #endif |
1470 | } |
1471 | |
1472 | void CodeBlock::getICStatusMap(ICStatusMap& result) |
1473 | { |
1474 | ConcurrentJSLocker locker(m_lock); |
1475 | getICStatusMap(locker, result); |
1476 | } |
1477 | |
1478 | #if ENABLE(JIT) |
1479 | StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType) |
1480 | { |
1481 | ConcurrentJSLocker locker(m_lock); |
1482 | return ensureJITData(locker).m_stubInfos.add(accessType); |
1483 | } |
1484 | |
1485 | JITAddIC* CodeBlock::addJITAddIC(BinaryArithProfile* arithProfile) |
1486 | { |
1487 | ConcurrentJSLocker locker(m_lock); |
1488 | return ensureJITData(locker).m_addICs.add(arithProfile); |
1489 | } |
1490 | |
1491 | JITMulIC* CodeBlock::addJITMulIC(BinaryArithProfile* arithProfile) |
1492 | { |
1493 | ConcurrentJSLocker locker(m_lock); |
1494 | return ensureJITData(locker).m_mulICs.add(arithProfile); |
1495 | } |
1496 | |
1497 | JITSubIC* CodeBlock::addJITSubIC(BinaryArithProfile* arithProfile) |
1498 | { |
1499 | ConcurrentJSLocker locker(m_lock); |
1500 | return ensureJITData(locker).m_subICs.add(arithProfile); |
1501 | } |
1502 | |
1503 | JITNegIC* CodeBlock::addJITNegIC(UnaryArithProfile* arithProfile) |
1504 | { |
1505 | ConcurrentJSLocker locker(m_lock); |
1506 | return ensureJITData(locker).m_negICs.add(arithProfile); |
1507 | } |
1508 | |
1509 | StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin) |
1510 | { |
1511 | ConcurrentJSLocker locker(m_lock); |
1512 | if (auto* jitData = m_jitData.get()) { |
1513 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) { |
1514 | if (stubInfo->codeOrigin == codeOrigin) |
1515 | return stubInfo; |
1516 | } |
1517 | } |
1518 | return nullptr; |
1519 | } |
1520 | |
1521 | ByValInfo* CodeBlock::addByValInfo() |
1522 | { |
1523 | ConcurrentJSLocker locker(m_lock); |
1524 | return ensureJITData(locker).m_byValInfos.add(); |
1525 | } |
1526 | |
1527 | CallLinkInfo* CodeBlock::addCallLinkInfo() |
1528 | { |
1529 | ConcurrentJSLocker locker(m_lock); |
1530 | return ensureJITData(locker).m_callLinkInfos.add(); |
1531 | } |
1532 | |
1533 | CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(BytecodeIndex index) |
1534 | { |
1535 | ConcurrentJSLocker locker(m_lock); |
1536 | if (auto* jitData = m_jitData.get()) { |
1537 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) { |
1538 | if (callLinkInfo->codeOrigin() == CodeOrigin(index)) |
1539 | return callLinkInfo; |
1540 | } |
1541 | } |
1542 | return nullptr; |
1543 | } |
1544 | |
1545 | RareCaseProfile* CodeBlock::addRareCaseProfile(BytecodeIndex bytecodeIndex) |
1546 | { |
1547 | ConcurrentJSLocker locker(m_lock); |
1548 | auto& jitData = ensureJITData(locker); |
1549 | jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeIndex)); |
1550 | return &jitData.m_rareCaseProfiles.last(); |
1551 | } |
1552 | |
1553 | RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex bytecodeIndex) |
1554 | { |
1555 | if (auto* jitData = m_jitData.get()) { |
1556 | return tryBinarySearch<RareCaseProfile, BytecodeIndex>( |
1557 | jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeIndex, |
1558 | getRareCaseProfileBytecodeIndex); |
1559 | } |
1560 | return nullptr; |
1561 | } |
1562 | |
1563 | unsigned CodeBlock::rareCaseProfileCountForBytecodeIndex(const ConcurrentJSLocker& locker, BytecodeIndex bytecodeIndex) |
1564 | { |
1565 | RareCaseProfile* profile = rareCaseProfileForBytecodeIndex(locker, bytecodeIndex); |
1566 | if (profile) |
1567 | return profile->m_counter; |
1568 | return 0; |
1569 | } |
1570 | |
1571 | void CodeBlock::setCalleeSaveRegisters(RegisterSet calleeSaveRegisters) |
1572 | { |
1573 | ConcurrentJSLocker locker(m_lock); |
1574 | ensureJITData(locker).m_calleeSaveRegisters = makeUnique<RegisterAtOffsetList>(calleeSaveRegisters); |
1575 | } |
1576 | |
1577 | void CodeBlock::setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList> registerAtOffsetList) |
1578 | { |
1579 | ConcurrentJSLocker locker(m_lock); |
1580 | ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList); |
1581 | } |
1582 | |
1583 | void CodeBlock::resetJITData() |
1584 | { |
1585 | RELEASE_ASSERT(!JITCode::isJIT(jitType())); |
1586 | ConcurrentJSLocker locker(m_lock); |
1587 | |
1588 | if (auto* jitData = m_jitData.get()) { |
1589 | // We can clear these because no other thread will have references to any stub infos, call |
1590 | // link infos, or by val infos if we don't have JIT code. Attempts to query these data |
1591 | // structures using the concurrent API (getICStatusMap and friends) will return nothing if we |
1592 | // don't have JIT code. |
1593 | jitData->m_stubInfos.clear(); |
1594 | jitData->m_callLinkInfos.clear(); |
1595 | jitData->m_byValInfos.clear(); |
1596 | // We can clear this because the DFG's queries to these data structures are guarded by whether |
1597 | // there is JIT code. |
1598 | jitData->m_rareCaseProfiles.clear(); |
1599 | } |
1600 | } |
1601 | #endif |
1602 | |
1603 | void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1604 | { |
1605 | // We strongly visit OSR exits targets because we don't want to deal with |
1606 | // the complexity of generating an exit target CodeBlock on demand and |
1607 | // guaranteeing that it matches the details of the CodeBlock we compiled |
1608 | // the OSR exit against. |
1609 | |
1610 | visitor.append(m_alternative); |
1611 | |
1612 | #if ENABLE(DFG_JIT) |
1613 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1614 | if (dfgCommon->inlineCallFrames) { |
1615 | for (auto* inlineCallFrame : *dfgCommon->inlineCallFrames) { |
1616 | ASSERT(inlineCallFrame->baselineCodeBlock); |
1617 | visitor.append(inlineCallFrame->baselineCodeBlock); |
1618 | } |
1619 | } |
1620 | #endif |
1621 | } |
1622 | |
1623 | void CodeBlock::stronglyVisitStrongReferences(const ConcurrentJSLocker& locker, SlotVisitor& visitor) |
1624 | { |
1625 | UNUSED_PARAM(locker); |
1626 | |
1627 | visitor.append(m_globalObject); |
1628 | visitor.append(m_ownerExecutable); // This is extra important since it causes the ExecutableToCodeBlockEdge to be marked. |
1629 | visitor.append(m_unlinkedCode); |
1630 | if (m_rareData) |
1631 | m_rareData->m_directEvalCodeCache.visitAggregate(visitor); |
1632 | visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size()); |
1633 | for (auto& functionExpr : m_functionExprs) |
1634 | visitor.append(functionExpr); |
1635 | for (auto& functionDecl : m_functionDecls) |
1636 | visitor.append(functionDecl); |
1637 | forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) { |
1638 | objectAllocationProfile.visitAggregate(visitor); |
1639 | }); |
1640 | |
1641 | #if ENABLE(JIT) |
1642 | if (auto* jitData = m_jitData.get()) { |
1643 | for (ByValInfo* byValInfo : jitData->m_byValInfos) |
1644 | visitor.append(byValInfo->cachedSymbol); |
1645 | } |
1646 | #endif |
1647 | |
1648 | #if ENABLE(DFG_JIT) |
1649 | if (JITCode::isOptimizingJIT(jitType())) |
1650 | visitOSRExitTargets(locker, visitor); |
1651 | #endif |
1652 | } |
1653 | |
1654 | void CodeBlock::stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor& visitor) |
1655 | { |
1656 | UNUSED_PARAM(visitor); |
1657 | |
1658 | #if ENABLE(DFG_JIT) |
1659 | if (!JITCode::isOptimizingJIT(jitType())) |
1660 | return; |
1661 | |
1662 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
1663 | |
1664 | for (auto& transition : dfgCommon->transitions) { |
1665 | if (!!transition.m_codeOrigin) |
1666 | visitor.append(transition.m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though. |
1667 | visitor.append(transition.m_from); |
1668 | visitor.append(transition.m_to); |
1669 | } |
1670 | |
1671 | for (auto& weakReference : dfgCommon->weakReferences) |
1672 | visitor.append(weakReference); |
1673 | |
1674 | for (auto& weakStructureReference : dfgCommon->weakStructureReferences) |
1675 | visitor.append(weakStructureReference); |
1676 | |
1677 | dfgCommon->livenessHasBeenProved = true; |
1678 | #endif |
1679 | } |
1680 | |
1681 | CodeBlock* CodeBlock::baselineAlternative() |
1682 | { |
1683 | #if ENABLE(JIT) |
1684 | CodeBlock* result = this; |
1685 | while (result->alternative()) |
1686 | result = result->alternative(); |
1687 | RELEASE_ASSERT(result); |
1688 | RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None); |
1689 | return result; |
1690 | #else |
1691 | return this; |
1692 | #endif |
1693 | } |
1694 | |
1695 | CodeBlock* CodeBlock::baselineVersion() |
1696 | { |
1697 | #if ENABLE(JIT) |
1698 | JITType selfJITType = jitType(); |
1699 | if (JITCode::isBaselineCode(selfJITType)) |
1700 | return this; |
1701 | CodeBlock* result = replacement(); |
1702 | if (!result) { |
1703 | if (JITCode::isOptimizingJIT(selfJITType)) { |
1704 | // The replacement can be null if we've had a memory clean up and the executable |
1705 | // has been purged of its codeBlocks (see ExecutableBase::clearCode()). Regardless, |
1706 | // the current codeBlock is still live on the stack, and as an optimizing JIT |
1707 | // codeBlock, it will keep its baselineAlternative() alive for us to fetch below. |
1708 | result = this; |
1709 | } else { |
1710 | // This can happen if we're creating the original CodeBlock for an executable. |
1711 | // Assume that we're the baseline CodeBlock. |
1712 | RELEASE_ASSERT(selfJITType == JITType::None); |
1713 | return this; |
1714 | } |
1715 | } |
1716 | result = result->baselineAlternative(); |
1717 | ASSERT(result); |
1718 | return result; |
1719 | #else |
1720 | return this; |
1721 | #endif |
1722 | } |
1723 | |
1724 | #if ENABLE(JIT) |
1725 | bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace) |
1726 | { |
1727 | CodeBlock* replacement = this->replacement(); |
1728 | return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace); |
1729 | } |
1730 | |
1731 | bool CodeBlock::hasOptimizedReplacement() |
1732 | { |
1733 | return hasOptimizedReplacement(jitType()); |
1734 | } |
1735 | #endif |
1736 | |
1737 | HandlerInfo* CodeBlock::handlerForBytecodeIndex(BytecodeIndex bytecodeIndex, RequiredHandler requiredHandler) |
1738 | { |
1739 | RELEASE_ASSERT(bytecodeIndex.offset() < instructions().size()); |
1740 | return handlerForIndex(bytecodeIndex.offset(), requiredHandler); |
1741 | } |
1742 | |
1743 | HandlerInfo* CodeBlock::handlerForIndex(unsigned index, RequiredHandler requiredHandler) |
1744 | { |
1745 | if (!m_rareData) |
1746 | return 0; |
1747 | return HandlerInfo::handlerForIndex(m_rareData->m_exceptionHandlers, index, requiredHandler); |
1748 | } |
1749 | |
1750 | DisposableCallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite) |
1751 | { |
1752 | #if ENABLE(DFG_JIT) |
1753 | RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType())); |
1754 | RELEASE_ASSERT(canGetCodeOrigin(originalCallSite)); |
1755 | ASSERT(!!handlerForIndex(originalCallSite.bits())); |
1756 | CodeOrigin originalOrigin = codeOrigin(originalCallSite); |
1757 | return m_jitCode->dfgCommon()->addDisposableCallSiteIndex(originalOrigin); |
1758 | #else |
1759 | // We never create new on-the-fly exception handling |
1760 | // call sites outside the DFG/FTL inline caches. |
1761 | UNUSED_PARAM(originalCallSite); |
1762 | RELEASE_ASSERT_NOT_REACHED(); |
1763 | return DisposableCallSiteIndex(0u); |
1764 | #endif |
1765 | } |
1766 | |
1767 | |
1768 | |
1769 | void CodeBlock::ensureCatchLivenessIsComputedForBytecodeIndex(BytecodeIndex bytecodeIndex) |
1770 | { |
1771 | auto& instruction = instructions().at(bytecodeIndex); |
1772 | OpCatch op = instruction->as<OpCatch>(); |
1773 | auto& metadata = op.metadata(this); |
1774 | if (!!metadata.m_buffer) { |
1775 | #if !ASSERT_DISABLED |
1776 | ConcurrentJSLocker locker(m_lock); |
1777 | bool found = false; |
1778 | auto* rareData = m_rareData.get(); |
1779 | ASSERT(rareData); |
1780 | for (auto& profile : rareData->m_catchProfiles) { |
1781 | if (profile.get() == metadata.m_buffer) { |
1782 | found = true; |
1783 | break; |
1784 | } |
1785 | } |
1786 | ASSERT(found); |
1787 | #endif |
1788 | return; |
1789 | } |
1790 | |
1791 | ensureCatchLivenessIsComputedForBytecodeIndexSlow(op, bytecodeIndex); |
1792 | } |
1793 | |
1794 | void CodeBlock::ensureCatchLivenessIsComputedForBytecodeIndexSlow(const OpCatch& op, BytecodeIndex bytecodeIndex) |
1795 | { |
1796 | BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis(); |
1797 | |
1798 | // We get the live-out set of variables at op_catch, not the live-in. This |
1799 | // is because the variables that the op_catch defines might be dead, and |
1800 | // we can avoid profiling them and extracting them when doing OSR entry |
1801 | // into the DFG. |
1802 | |
1803 | auto nextOffset = instructions().at(bytecodeIndex).next().offset(); |
1804 | FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeIndex(this, BytecodeIndex(nextOffset)); |
1805 | Vector<VirtualRegister> liveOperands; |
1806 | liveOperands.reserveInitialCapacity(liveLocals.bitCount()); |
1807 | liveLocals.forEachSetBit([&] (unsigned liveLocal) { |
1808 | liveOperands.append(virtualRegisterForLocal(liveLocal)); |
1809 | }); |
1810 | |
1811 | for (int i = 0; i < numParameters(); ++i) |
1812 | liveOperands.append(virtualRegisterForArgument(i)); |
1813 | |
1814 | auto profiles = makeUnique<ValueProfileAndOperandBuffer>(liveOperands.size()); |
1815 | RELEASE_ASSERT(profiles->m_size == liveOperands.size()); |
1816 | for (unsigned i = 0; i < profiles->m_size; ++i) |
1817 | profiles->m_buffer.get()[i].m_operand = liveOperands[i].offset(); |
1818 | |
1819 | createRareDataIfNecessary(); |
1820 | |
1821 | // The compiler thread will read this pointer value and then proceed to dereference it |
1822 | // if it is not null. We need to make sure all above stores happen before this store so |
1823 | // the compiler thread reads fully initialized data. |
1824 | WTF::storeStoreFence(); |
1825 | |
1826 | op.metadata(this).m_buffer = profiles.get(); |
1827 | { |
1828 | ConcurrentJSLocker locker(m_lock); |
1829 | m_rareData->m_catchProfiles.append(WTFMove(profiles)); |
1830 | } |
1831 | } |
1832 | |
1833 | void CodeBlock::removeExceptionHandlerForCallSite(DisposableCallSiteIndex callSiteIndex) |
1834 | { |
1835 | RELEASE_ASSERT(m_rareData); |
1836 | Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers; |
1837 | unsigned index = callSiteIndex.bits(); |
1838 | for (size_t i = 0; i < exceptionHandlers.size(); ++i) { |
1839 | HandlerInfo& handler = exceptionHandlers[i]; |
1840 | if (handler.start <= index && handler.end > index) { |
1841 | exceptionHandlers.remove(i); |
1842 | return; |
1843 | } |
1844 | } |
1845 | |
1846 | RELEASE_ASSERT_NOT_REACHED(); |
1847 | } |
1848 | |
1849 | unsigned CodeBlock::lineNumberForBytecodeIndex(BytecodeIndex bytecodeIndex) |
1850 | { |
1851 | RELEASE_ASSERT(bytecodeIndex.offset() < instructions().size()); |
1852 | return ownerExecutable()->firstLine() + m_unlinkedCode->lineNumberForBytecodeIndex(bytecodeIndex); |
1853 | } |
1854 | |
1855 | unsigned CodeBlock::columnNumberForBytecodeIndex(BytecodeIndex bytecodeIndex) |
1856 | { |
1857 | int divot; |
1858 | int startOffset; |
1859 | int endOffset; |
1860 | unsigned line; |
1861 | unsigned column; |
1862 | expressionRangeForBytecodeIndex(bytecodeIndex, divot, startOffset, endOffset, line, column); |
1863 | return column; |
1864 | } |
1865 | |
1866 | void CodeBlock::expressionRangeForBytecodeIndex(BytecodeIndex bytecodeIndex, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const |
1867 | { |
1868 | m_unlinkedCode->expressionRangeForBytecodeIndex(bytecodeIndex, divot, startOffset, endOffset, line, column); |
1869 | divot += sourceOffset(); |
1870 | column += line ? 1 : firstLineColumnOffset(); |
1871 | line += ownerExecutable()->firstLine(); |
1872 | } |
1873 | |
1874 | bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, Optional<unsigned> column) |
1875 | { |
1876 | const InstructionStream& instructionStream = instructions(); |
1877 | for (const auto& it : instructionStream) { |
1878 | if (it->is<OpDebug>()) { |
1879 | int unused; |
1880 | unsigned opDebugLine; |
1881 | unsigned opDebugColumn; |
1882 | expressionRangeForBytecodeIndex(it.index(), unused, unused, unused, opDebugLine, opDebugColumn); |
1883 | if (line == opDebugLine && (!column || column == opDebugColumn)) |
1884 | return true; |
1885 | } |
1886 | } |
1887 | return false; |
1888 | } |
1889 | |
1890 | void CodeBlock::shrinkToFit(ShrinkMode shrinkMode) |
1891 | { |
1892 | ConcurrentJSLocker locker(m_lock); |
1893 | |
1894 | #if ENABLE(JIT) |
1895 | if (auto* jitData = m_jitData.get()) |
1896 | jitData->m_rareCaseProfiles.shrinkToFit(); |
1897 | #endif |
1898 | |
1899 | if (shrinkMode == EarlyShrink) { |
1900 | m_constantRegisters.shrinkToFit(); |
1901 | m_constantsSourceCodeRepresentation.shrinkToFit(); |
1902 | |
1903 | if (m_rareData) { |
1904 | m_rareData->m_switchJumpTables.shrinkToFit(); |
1905 | m_rareData->m_stringSwitchJumpTables.shrinkToFit(); |
1906 | } |
1907 | } // else don't shrink these, because we would have already pointed pointers into these tables. |
1908 | } |
1909 | |
1910 | #if ENABLE(JIT) |
1911 | void CodeBlock::linkIncomingCall(CallFrame* callerFrame, CallLinkInfo* incoming) |
1912 | { |
1913 | noticeIncomingCall(callerFrame); |
1914 | ConcurrentJSLocker locker(m_lock); |
1915 | ensureJITData(locker).m_incomingCalls.push(incoming); |
1916 | } |
1917 | |
1918 | void CodeBlock::linkIncomingPolymorphicCall(CallFrame* callerFrame, PolymorphicCallNode* incoming) |
1919 | { |
1920 | noticeIncomingCall(callerFrame); |
1921 | { |
1922 | ConcurrentJSLocker locker(m_lock); |
1923 | ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming); |
1924 | } |
1925 | } |
1926 | #endif // ENABLE(JIT) |
1927 | |
1928 | void CodeBlock::unlinkIncomingCalls() |
1929 | { |
1930 | while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end()) |
1931 | m_incomingLLIntCalls.begin()->unlink(); |
1932 | #if ENABLE(JIT) |
1933 | JITData* jitData = nullptr; |
1934 | { |
1935 | ConcurrentJSLocker locker(m_lock); |
1936 | jitData = m_jitData.get(); |
1937 | } |
1938 | if (jitData) { |
1939 | while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end()) |
1940 | jitData->m_incomingCalls.begin()->unlink(vm()); |
1941 | while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end()) |
1942 | jitData->m_incomingPolymorphicCalls.begin()->unlink(vm()); |
1943 | } |
1944 | #endif // ENABLE(JIT) |
1945 | } |
1946 | |
1947 | void CodeBlock::linkIncomingCall(CallFrame* callerFrame, LLIntCallLinkInfo* incoming) |
1948 | { |
1949 | noticeIncomingCall(callerFrame); |
1950 | m_incomingLLIntCalls.push(incoming); |
1951 | } |
1952 | |
1953 | CodeBlock* CodeBlock::newReplacement() |
1954 | { |
1955 | return ownerExecutable()->newReplacementCodeBlockFor(specializationKind()); |
1956 | } |
1957 | |
1958 | #if ENABLE(JIT) |
1959 | CodeBlock* CodeBlock::replacement() |
1960 | { |
1961 | const ClassInfo* classInfo = this->classInfo(vm()); |
1962 | |
1963 | if (classInfo == FunctionCodeBlock::info()) |
1964 | return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(isConstructor() ? CodeForConstruct : CodeForCall); |
1965 | |
1966 | if (classInfo == EvalCodeBlock::info()) |
1967 | return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock(); |
1968 | |
1969 | if (classInfo == ProgramCodeBlock::info()) |
1970 | return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock(); |
1971 | |
1972 | if (classInfo == ModuleProgramCodeBlock::info()) |
1973 | return jsCast<ModuleProgramExecutable*>(ownerExecutable())->codeBlock(); |
1974 | |
1975 | RELEASE_ASSERT_NOT_REACHED(); |
1976 | return nullptr; |
1977 | } |
1978 | |
1979 | DFG::CapabilityLevel CodeBlock::computeCapabilityLevel() |
1980 | { |
1981 | const ClassInfo* classInfo = this->classInfo(vm()); |
1982 | |
1983 | if (classInfo == FunctionCodeBlock::info()) { |
1984 | if (isConstructor()) |
1985 | return DFG::functionForConstructCapabilityLevel(this); |
1986 | return DFG::functionForCallCapabilityLevel(this); |
1987 | } |
1988 | |
1989 | if (classInfo == EvalCodeBlock::info()) |
1990 | return DFG::evalCapabilityLevel(this); |
1991 | |
1992 | if (classInfo == ProgramCodeBlock::info()) |
1993 | return DFG::programCapabilityLevel(this); |
1994 | |
1995 | if (classInfo == ModuleProgramCodeBlock::info()) |
1996 | return DFG::programCapabilityLevel(this); |
1997 | |
1998 | RELEASE_ASSERT_NOT_REACHED(); |
1999 | return DFG::CannotCompile; |
2000 | } |
2001 | |
2002 | #endif // ENABLE(JIT) |
2003 | |
2004 | void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode, const FireDetail* detail) |
2005 | { |
2006 | #if !ENABLE(DFG_JIT) |
2007 | UNUSED_PARAM(mode); |
2008 | UNUSED_PARAM(detail); |
2009 | #endif |
2010 | |
2011 | VM& vm = *m_vm; |
2012 | |
2013 | CodeBlock* codeBlock = this; // Placate GCC for use in CODEBLOCK_LOG_EVENT (does not like this). |
2014 | CODEBLOCK_LOG_EVENT(codeBlock, "jettison" , ("due to " , reason, ", counting = " , mode == CountReoptimization, ", detail = " , pointerDump(detail))); |
2015 | |
2016 | RELEASE_ASSERT(reason != Profiler::NotJettisoned); |
2017 | |
2018 | #if ENABLE(DFG_JIT) |
2019 | if (DFG::shouldDumpDisassembly()) { |
2020 | dataLog("Jettisoning " , *this); |
2021 | if (mode == CountReoptimization) |
2022 | dataLog(" and counting reoptimization" ); |
2023 | dataLog(" due to " , reason); |
2024 | if (detail) |
2025 | dataLog(", " , *detail); |
2026 | dataLog(".\n" ); |
2027 | } |
2028 | |
2029 | if (reason == Profiler::JettisonDueToWeakReference) { |
2030 | if (DFG::shouldDumpDisassembly()) { |
2031 | dataLog(*this, " will be jettisoned because of the following dead references:\n" ); |
2032 | DFG::CommonData* dfgCommon = m_jitCode->dfgCommon(); |
2033 | for (auto& transition : dfgCommon->transitions) { |
2034 | JSCell* origin = transition.m_codeOrigin.get(); |
2035 | JSCell* from = transition.m_from.get(); |
2036 | JSCell* to = transition.m_to.get(); |
2037 | if ((!origin || vm.heap.isMarked(origin)) && vm.heap.isMarked(from)) |
2038 | continue; |
2039 | dataLog(" Transition under " , RawPointer(origin), ", " , RawPointer(from), " -> " , RawPointer(to), ".\n" ); |
2040 | } |
2041 | for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) { |
2042 | JSCell* weak = dfgCommon->weakReferences[i].get(); |
2043 | if (vm.heap.isMarked(weak)) |
2044 | continue; |
2045 | dataLog(" Weak reference " , RawPointer(weak), ".\n" ); |
2046 | } |
2047 | } |
2048 | } |
2049 | #endif // ENABLE(DFG_JIT) |
2050 | |
2051 | DeferGCForAWhile deferGC(*heap()); |
2052 | |
2053 | // We want to accomplish two things here: |
2054 | // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it |
2055 | // we should OSR exit at the top of the next bytecode instruction after the return. |
2056 | // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock. |
2057 | |
2058 | #if ENABLE(DFG_JIT) |
2059 | if (JITCode::isOptimizingJIT(jitType())) |
2060 | jitCode()->dfgCommon()->clearWatchpoints(); |
2061 | |
2062 | if (reason != Profiler::JettisonDueToOldAge) { |
2063 | Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get(); |
2064 | if (UNLIKELY(compilation)) |
2065 | compilation->setJettisonReason(reason, detail); |
2066 | |
2067 | // This accomplishes (1), and does its own book-keeping about whether it has already happened. |
2068 | if (!jitCode()->dfgCommon()->invalidate()) { |
2069 | // We've already been invalidated. |
2070 | RELEASE_ASSERT(this != replacement() || (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable()))); |
2071 | return; |
2072 | } |
2073 | } |
2074 | |
2075 | if (DFG::shouldDumpDisassembly()) |
2076 | dataLog(" Did invalidate " , *this, "\n" ); |
2077 | |
2078 | // Count the reoptimization if that's what the user wanted. |
2079 | if (mode == CountReoptimization) { |
2080 | // FIXME: Maybe this should call alternative(). |
2081 | // https://bugs.webkit.org/show_bug.cgi?id=123677 |
2082 | baselineAlternative()->countReoptimization(); |
2083 | if (DFG::shouldDumpDisassembly()) |
2084 | dataLog(" Did count reoptimization for " , *this, "\n" ); |
2085 | } |
2086 | |
2087 | if (this != replacement()) { |
2088 | // This means that we were never the entrypoint. This can happen for OSR entry code |
2089 | // blocks. |
2090 | return; |
2091 | } |
2092 | |
2093 | if (alternative()) |
2094 | alternative()->optimizeAfterWarmUp(); |
2095 | |
2096 | if (reason != Profiler::JettisonDueToOldAge && reason != Profiler::JettisonDueToVMTraps) |
2097 | tallyFrequentExitSites(); |
2098 | #endif // ENABLE(DFG_JIT) |
2099 | |
2100 | // Jettison can happen during GC. We don't want to install code to a dead executable |
2101 | // because that would add a dead object to the remembered set. |
2102 | if (vm.heap.isCurrentThreadBusy() && !vm.heap.isMarked(ownerExecutable())) |
2103 | return; |
2104 | |
2105 | #if ENABLE(JIT) |
2106 | { |
2107 | ConcurrentJSLocker locker(m_lock); |
2108 | if (JITData* jitData = m_jitData.get()) { |
2109 | for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) |
2110 | callLinkInfo->setClearedByJettison(); |
2111 | } |
2112 | } |
2113 | #endif |
2114 | |
2115 | // This accomplishes (2). |
2116 | ownerExecutable()->installCode(vm, alternative(), codeType(), specializationKind()); |
2117 | |
2118 | #if ENABLE(DFG_JIT) |
2119 | if (DFG::shouldDumpDisassembly()) |
2120 | dataLog(" Did install baseline version of " , *this, "\n" ); |
2121 | #endif // ENABLE(DFG_JIT) |
2122 | } |
2123 | |
2124 | JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin) |
2125 | { |
2126 | auto* inlineCallFrame = codeOrigin.inlineCallFrame(); |
2127 | if (!inlineCallFrame) |
2128 | return globalObject(); |
2129 | return inlineCallFrame->baselineCodeBlock->globalObject(); |
2130 | } |
2131 | |
2132 | class RecursionCheckFunctor { |
2133 | public: |
2134 | RecursionCheckFunctor(CallFrame* startCallFrame, CodeBlock* codeBlock, unsigned depthToCheck) |
2135 | : m_startCallFrame(startCallFrame) |
2136 | , m_codeBlock(codeBlock) |
2137 | , m_depthToCheck(depthToCheck) |
2138 | , m_foundStartCallFrame(false) |
2139 | , m_didRecurse(false) |
2140 | { } |
2141 | |
2142 | StackVisitor::Status operator()(StackVisitor& visitor) const |
2143 | { |
2144 | CallFrame* currentCallFrame = visitor->callFrame(); |
2145 | |
2146 | if (currentCallFrame == m_startCallFrame) |
2147 | m_foundStartCallFrame = true; |
2148 | |
2149 | if (m_foundStartCallFrame) { |
2150 | if (visitor->callFrame()->codeBlock() == m_codeBlock) { |
2151 | m_didRecurse = true; |
2152 | return StackVisitor::Done; |
2153 | } |
2154 | |
2155 | if (!m_depthToCheck--) |
2156 | return StackVisitor::Done; |
2157 | } |
2158 | |
2159 | return StackVisitor::Continue; |
2160 | } |
2161 | |
2162 | bool didRecurse() const { return m_didRecurse; } |
2163 | |
2164 | private: |
2165 | CallFrame* m_startCallFrame; |
2166 | CodeBlock* m_codeBlock; |
2167 | mutable unsigned m_depthToCheck; |
2168 | mutable bool m_foundStartCallFrame; |
2169 | mutable bool m_didRecurse; |
2170 | }; |
2171 | |
2172 | void CodeBlock::noticeIncomingCall(CallFrame* callerFrame) |
2173 | { |
2174 | CodeBlock* callerCodeBlock = callerFrame->codeBlock(); |
2175 | |
2176 | if (Options::verboseCallLink()) |
2177 | dataLog("Noticing call link from " , pointerDump(callerCodeBlock), " to " , *this, "\n" ); |
2178 | |
2179 | #if ENABLE(DFG_JIT) |
2180 | if (!m_shouldAlwaysBeInlined) |
2181 | return; |
2182 | |
2183 | if (!callerCodeBlock) { |
2184 | m_shouldAlwaysBeInlined = false; |
2185 | if (Options::verboseCallLink()) |
2186 | dataLog(" Clearing SABI because caller is native.\n" ); |
2187 | return; |
2188 | } |
2189 | |
2190 | if (!hasBaselineJITProfiling()) |
2191 | return; |
2192 | |
2193 | if (!DFG::mightInlineFunction(this)) |
2194 | return; |
2195 | |
2196 | if (!canInline(capabilityLevelState())) |
2197 | return; |
2198 | |
2199 | if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) { |
2200 | m_shouldAlwaysBeInlined = false; |
2201 | if (Options::verboseCallLink()) |
2202 | dataLog(" Clearing SABI because caller is too large.\n" ); |
2203 | return; |
2204 | } |
2205 | |
2206 | if (callerCodeBlock->jitType() == JITType::InterpreterThunk) { |
2207 | // If the caller is still in the interpreter, then we can't expect inlining to |
2208 | // happen anytime soon. Assume it's profitable to optimize it separately. This |
2209 | // ensures that a function is SABI only if it is called no more frequently than |
2210 | // any of its callers. |
2211 | m_shouldAlwaysBeInlined = false; |
2212 | if (Options::verboseCallLink()) |
2213 | dataLog(" Clearing SABI because caller is in LLInt.\n" ); |
2214 | return; |
2215 | } |
2216 | |
2217 | if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) { |
2218 | m_shouldAlwaysBeInlined = false; |
2219 | if (Options::verboseCallLink()) |
2220 | dataLog(" Clearing SABI bcause caller was already optimized.\n" ); |
2221 | return; |
2222 | } |
2223 | |
2224 | if (callerCodeBlock->codeType() != FunctionCode) { |
2225 | // If the caller is either eval or global code, assume that that won't be |
2226 | // optimized anytime soon. For eval code this is particularly true since we |
2227 | // delay eval optimization by a *lot*. |
2228 | m_shouldAlwaysBeInlined = false; |
2229 | if (Options::verboseCallLink()) |
2230 | dataLog(" Clearing SABI because caller is not a function.\n" ); |
2231 | return; |
2232 | } |
2233 | |
2234 | // Recursive calls won't be inlined. |
2235 | RecursionCheckFunctor functor(callerFrame, this, Options::maximumInliningDepth()); |
2236 | vm().topCallFrame->iterate(vm(), functor); |
2237 | |
2238 | if (functor.didRecurse()) { |
2239 | if (Options::verboseCallLink()) |
2240 | dataLog(" Clearing SABI because recursion was detected.\n" ); |
2241 | m_shouldAlwaysBeInlined = false; |
2242 | return; |
2243 | } |
2244 | |
2245 | if (callerCodeBlock->capabilityLevelState() == DFG::CapabilityLevelNotSet) { |
2246 | dataLog("In call from " , FullCodeOrigin(callerCodeBlock, callerFrame->codeOrigin()), " to " , *this, ": caller's DFG capability level is not set.\n" ); |
2247 | CRASH(); |
2248 | } |
2249 | |
2250 | if (canCompile(callerCodeBlock->capabilityLevelState())) |
2251 | return; |
2252 | |
2253 | if (Options::verboseCallLink()) |
2254 | dataLog(" Clearing SABI because the caller is not a DFG candidate.\n" ); |
2255 | |
2256 | m_shouldAlwaysBeInlined = false; |
2257 | #endif |
2258 | } |
2259 | |
2260 | unsigned CodeBlock::reoptimizationRetryCounter() const |
2261 | { |
2262 | #if ENABLE(JIT) |
2263 | ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax()); |
2264 | return m_reoptimizationRetryCounter; |
2265 | #else |
2266 | return 0; |
2267 | #endif // ENABLE(JIT) |
2268 | } |
2269 | |
2270 | #if !ENABLE(C_LOOP) |
2271 | const RegisterAtOffsetList* CodeBlock::calleeSaveRegisters() const |
2272 | { |
2273 | #if ENABLE(JIT) |
2274 | if (auto* jitData = m_jitData.get()) { |
2275 | if (const RegisterAtOffsetList* registers = jitData->m_calleeSaveRegisters.get()) |
2276 | return registers; |
2277 | } |
2278 | #endif |
2279 | return &RegisterAtOffsetList::llintBaselineCalleeSaveRegisters(); |
2280 | } |
2281 | |
2282 | |
2283 | static size_t roundCalleeSaveSpaceAsVirtualRegisters(size_t calleeSaveRegisters) |
2284 | { |
2285 | |
2286 | return (WTF::roundUpToMultipleOf(sizeof(Register), calleeSaveRegisters * sizeof(CPURegister)) / sizeof(Register)); |
2287 | |
2288 | } |
2289 | |
2290 | size_t CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() |
2291 | { |
2292 | return roundCalleeSaveSpaceAsVirtualRegisters(numberOfLLIntBaselineCalleeSaveRegisters()); |
2293 | } |
2294 | |
2295 | size_t CodeBlock::calleeSaveSpaceAsVirtualRegisters() |
2296 | { |
2297 | return roundCalleeSaveSpaceAsVirtualRegisters(calleeSaveRegisters()->size()); |
2298 | } |
2299 | #endif |
2300 | |
2301 | #if ENABLE(JIT) |
2302 | |
2303 | void CodeBlock::countReoptimization() |
2304 | { |
2305 | m_reoptimizationRetryCounter++; |
2306 | if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax()) |
2307 | m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax(); |
2308 | } |
2309 | |
2310 | unsigned CodeBlock::numberOfDFGCompiles() |
2311 | { |
2312 | ASSERT(JITCode::isBaselineCode(jitType())); |
2313 | if (Options::testTheFTL()) { |
2314 | if (m_didFailFTLCompilation) |
2315 | return 1000000; |
2316 | return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter; |
2317 | } |
2318 | CodeBlock* replacement = this->replacement(); |
2319 | return ((replacement && JITCode::isOptimizingJIT(replacement->jitType())) ? 1 : 0) + m_reoptimizationRetryCounter; |
2320 | } |
2321 | |
2322 | int32_t CodeBlock::codeTypeThresholdMultiplier() const |
2323 | { |
2324 | if (codeType() == EvalCode) |
2325 | return Options::evalThresholdMultiplier(); |
2326 | |
2327 | return 1; |
2328 | } |
2329 | |
2330 | double CodeBlock::optimizationThresholdScalingFactor() |
2331 | { |
2332 | // This expression arises from doing a least-squares fit of |
2333 | // |
2334 | // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d |
2335 | // |
2336 | // against the data points: |
2337 | // |
2338 | // x F[x_] |
2339 | // 10 0.9 (smallest reasonable code block) |
2340 | // 200 1.0 (typical small-ish code block) |
2341 | // 320 1.2 (something I saw in 3d-cube that I wanted to optimize) |
2342 | // 1268 5.0 (something I saw in 3d-cube that I didn't want to optimize) |
2343 | // 4000 5.5 (random large size, used to cause the function to converge to a shallow curve of some sort) |
2344 | // 10000 6.0 (similar to above) |
2345 | // |
2346 | // I achieve the minimization using the following Mathematica code: |
2347 | // |
2348 | // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d |
2349 | // |
2350 | // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}} |
2351 | // |
2352 | // solution = |
2353 | // Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples), |
2354 | // {a, b, c, d}][[2]] |
2355 | // |
2356 | // And the code below (to initialize a, b, c, d) is generated by: |
2357 | // |
2358 | // Print["const double " <> ToString[#[[1]]] <> " = " <> |
2359 | // If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution |
2360 | // |
2361 | // We've long known the following to be true: |
2362 | // - Small code blocks are cheap to optimize and so we should do it sooner rather |
2363 | // than later. |
2364 | // - Large code blocks are expensive to optimize and so we should postpone doing so, |
2365 | // and sometimes have a large enough threshold that we never optimize them. |
2366 | // - The difference in cost is not totally linear because (a) just invoking the |
2367 | // DFG incurs some base cost and (b) for large code blocks there is enough slop |
2368 | // in the correlation between instruction count and the actual compilation cost |
2369 | // that for those large blocks, the instruction count should not have a strong |
2370 | // influence on our threshold. |
2371 | // |
2372 | // I knew the goals but I didn't know how to achieve them; so I picked an interesting |
2373 | // example where the heuristics were right (code block in 3d-cube with instruction |
2374 | // count 320, which got compiled early as it should have been) and one where they were |
2375 | // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive |
2376 | // to compile and didn't run often enough to warrant compilation in my opinion), and |
2377 | // then threw in additional data points that represented my own guess of what our |
2378 | // heuristics should do for some round-numbered examples. |
2379 | // |
2380 | // The expression to which I decided to fit the data arose because I started with an |
2381 | // affine function, and then did two things: put the linear part in an Abs to ensure |
2382 | // that the fit didn't end up choosing a negative value of c (which would result in |
2383 | // the function turning over and going negative for large x) and I threw in a Sqrt |
2384 | // term because Sqrt represents my intution that the function should be more sensitive |
2385 | // to small changes in small values of x, but less sensitive when x gets large. |
2386 | |
2387 | // Note that the current fit essentially eliminates the linear portion of the |
2388 | // expression (c == 0.0). |
2389 | const double a = 0.061504; |
2390 | const double b = 1.02406; |
2391 | const double c = 0.0; |
2392 | const double d = 0.825914; |
2393 | |
2394 | double bytecodeCost = this->bytecodeCost(); |
2395 | |
2396 | ASSERT(bytecodeCost); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense. |
2397 | |
2398 | double result = d + a * sqrt(bytecodeCost + b) + c * bytecodeCost; |
2399 | |
2400 | result *= codeTypeThresholdMultiplier(); |
2401 | |
2402 | if (Options::verboseOSR()) { |
2403 | dataLog( |
2404 | *this, ": bytecode cost is " , bytecodeCost, |
2405 | ", scaling execution counter by " , result, " * " , codeTypeThresholdMultiplier(), |
2406 | "\n" ); |
2407 | } |
2408 | return result; |
2409 | } |
2410 | |
2411 | static int32_t clipThreshold(double threshold) |
2412 | { |
2413 | if (threshold < 1.0) |
2414 | return 1; |
2415 | |
2416 | if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max())) |
2417 | return std::numeric_limits<int32_t>::max(); |
2418 | |
2419 | return static_cast<int32_t>(threshold); |
2420 | } |
2421 | |
2422 | int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold) |
2423 | { |
2424 | return clipThreshold( |
2425 | static_cast<double>(desiredThreshold) * |
2426 | optimizationThresholdScalingFactor() * |
2427 | (1 << reoptimizationRetryCounter())); |
2428 | } |
2429 | |
2430 | bool CodeBlock::checkIfOptimizationThresholdReached() |
2431 | { |
2432 | #if ENABLE(DFG_JIT) |
2433 | if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) { |
2434 | if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode)) |
2435 | == DFG::Worklist::Compiled) { |
2436 | optimizeNextInvocation(); |
2437 | return true; |
2438 | } |
2439 | } |
2440 | #endif |
2441 | |
2442 | return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this); |
2443 | } |
2444 | |
2445 | #if ENABLE(DFG_JIT) |
2446 | auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction |
2447 | { |
2448 | DFG::OSRExitBase& exit = exitState.exit; |
2449 | if (!exitKindMayJettison(exit.m_kind)) { |
2450 | // FIXME: We may want to notice that we're frequently exiting |
2451 | // at an op_catch that we didn't compile an entrypoint for, and |
2452 | // then trigger a reoptimization of this CodeBlock: |
2453 | // https://bugs.webkit.org/show_bug.cgi?id=175842 |
2454 | return OptimizeAction::None; |
2455 | } |
2456 | |
2457 | exit.m_count++; |
2458 | m_osrExitCounter++; |
2459 | |
2460 | CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock; |
2461 | ASSERT(baselineCodeBlock == baselineAlternative()); |
2462 | if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold())) |
2463 | return OptimizeAction::ReoptimizeNow; |
2464 | |
2465 | // We want to figure out if there's a possibility that we're in a loop. For the outermost |
2466 | // code block in the inline stack, we handle this appropriately by having the loop OSR trigger |
2467 | // check the exit count of the replacement of the CodeBlock from which we are OSRing. The |
2468 | // problem is the inlined functions, which might also have loops, but whose baseline versions |
2469 | // don't know where to look for the exit count. Figure out if those loops are severe enough |
2470 | // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. |
2471 | // Otherwise, we should use the normal reoptimization trigger. |
2472 | |
2473 | bool didTryToEnterInLoop = false; |
2474 | for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) { |
2475 | if (inlineCallFrame->baselineCodeBlock->ownerExecutable()->didTryToEnterInLoop()) { |
2476 | didTryToEnterInLoop = true; |
2477 | break; |
2478 | } |
2479 | } |
2480 | |
2481 | uint32_t exitCountThreshold = didTryToEnterInLoop |
2482 | ? exitCountThresholdForReoptimizationFromLoop() |
2483 | : exitCountThresholdForReoptimization(); |
2484 | |
2485 | if (m_osrExitCounter > exitCountThreshold) |
2486 | return OptimizeAction::ReoptimizeNow; |
2487 | |
2488 | // Too few fails. Adjust the execution counter such that the target is to only optimize after a while. |
2489 | baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold); |
2490 | return OptimizeAction::None; |
2491 | } |
2492 | #endif |
2493 | |
2494 | void CodeBlock::optimizeNextInvocation() |
2495 | { |
2496 | if (Options::verboseOSR()) |
2497 | dataLog(*this, ": Optimizing next invocation.\n" ); |
2498 | m_jitExecuteCounter.setNewThreshold(0, this); |
2499 | } |
2500 | |
2501 | void CodeBlock::dontOptimizeAnytimeSoon() |
2502 | { |
2503 | if (Options::verboseOSR()) |
2504 | dataLog(*this, ": Not optimizing anytime soon.\n" ); |
2505 | m_jitExecuteCounter.deferIndefinitely(); |
2506 | } |
2507 | |
2508 | void CodeBlock::optimizeAfterWarmUp() |
2509 | { |
2510 | if (Options::verboseOSR()) |
2511 | dataLog(*this, ": Optimizing after warm-up.\n" ); |
2512 | #if ENABLE(DFG_JIT) |
2513 | m_jitExecuteCounter.setNewThreshold( |
2514 | adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this); |
2515 | #endif |
2516 | } |
2517 | |
2518 | void CodeBlock::optimizeAfterLongWarmUp() |
2519 | { |
2520 | if (Options::verboseOSR()) |
2521 | dataLog(*this, ": Optimizing after long warm-up.\n" ); |
2522 | #if ENABLE(DFG_JIT) |
2523 | m_jitExecuteCounter.setNewThreshold( |
2524 | adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this); |
2525 | #endif |
2526 | } |
2527 | |
2528 | void CodeBlock::optimizeSoon() |
2529 | { |
2530 | if (Options::verboseOSR()) |
2531 | dataLog(*this, ": Optimizing soon.\n" ); |
2532 | #if ENABLE(DFG_JIT) |
2533 | m_jitExecuteCounter.setNewThreshold( |
2534 | adjustedCounterValue(Options::thresholdForOptimizeSoon()), this); |
2535 | #endif |
2536 | } |
2537 | |
2538 | void CodeBlock::forceOptimizationSlowPathConcurrently() |
2539 | { |
2540 | if (Options::verboseOSR()) |
2541 | dataLog(*this, ": Forcing slow path concurrently.\n" ); |
2542 | m_jitExecuteCounter.forceSlowPathConcurrently(); |
2543 | } |
2544 | |
2545 | #if ENABLE(DFG_JIT) |
2546 | void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result) |
2547 | { |
2548 | JITType type = jitType(); |
2549 | if (type != JITType::BaselineJIT) { |
2550 | dataLog(*this, ": expected to have baseline code but have " , type, "\n" ); |
2551 | CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type)); |
2552 | } |
2553 | |
2554 | CodeBlock* replacement = this->replacement(); |
2555 | bool hasReplacement = (replacement && replacement != this); |
2556 | if ((result == CompilationSuccessful) != hasReplacement) { |
2557 | dataLog(*this, ": we have result = " , result, " but " ); |
2558 | if (replacement == this) |
2559 | dataLog("we are our own replacement.\n" ); |
2560 | else |
2561 | dataLog("our replacement is " , pointerDump(replacement), "\n" ); |
2562 | RELEASE_ASSERT_NOT_REACHED(); |
2563 | } |
2564 | |
2565 | switch (result) { |
2566 | case CompilationSuccessful: |
2567 | RELEASE_ASSERT(replacement && JITCode::isOptimizingJIT(replacement->jitType())); |
2568 | optimizeNextInvocation(); |
2569 | return; |
2570 | case CompilationFailed: |
2571 | dontOptimizeAnytimeSoon(); |
2572 | return; |
2573 | case CompilationDeferred: |
2574 | // We'd like to do dontOptimizeAnytimeSoon() but we cannot because |
2575 | // forceOptimizationSlowPathConcurrently() is inherently racy. It won't |
2576 | // necessarily guarantee anything. So, we make sure that even if that |
2577 | // function ends up being a no-op, we still eventually retry and realize |
2578 | // that we have optimized code ready. |
2579 | optimizeAfterWarmUp(); |
2580 | return; |
2581 | case CompilationInvalidated: |
2582 | // Retry with exponential backoff. |
2583 | countReoptimization(); |
2584 | optimizeAfterWarmUp(); |
2585 | return; |
2586 | } |
2587 | |
2588 | dataLog("Unrecognized result: " , static_cast<int>(result), "\n" ); |
2589 | RELEASE_ASSERT_NOT_REACHED(); |
2590 | } |
2591 | |
2592 | #endif |
2593 | |
2594 | uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold) |
2595 | { |
2596 | ASSERT(JITCode::isOptimizingJIT(jitType())); |
2597 | // Compute this the lame way so we don't saturate. This is called infrequently |
2598 | // enough that this loop won't hurt us. |
2599 | unsigned result = desiredThreshold; |
2600 | for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) { |
2601 | unsigned newResult = result << 1; |
2602 | if (newResult < result) |
2603 | return std::numeric_limits<uint32_t>::max(); |
2604 | result = newResult; |
2605 | } |
2606 | return result; |
2607 | } |
2608 | |
2609 | uint32_t CodeBlock::exitCountThresholdForReoptimization() |
2610 | { |
2611 | return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier()); |
2612 | } |
2613 | |
2614 | uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop() |
2615 | { |
2616 | return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier()); |
2617 | } |
2618 | |
2619 | bool CodeBlock::shouldReoptimizeNow() |
2620 | { |
2621 | return osrExitCounter() >= exitCountThresholdForReoptimization(); |
2622 | } |
2623 | |
2624 | bool CodeBlock::shouldReoptimizeFromLoopNow() |
2625 | { |
2626 | return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop(); |
2627 | } |
2628 | #endif |
2629 | |
2630 | ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, BytecodeIndex bytecodeIndex) |
2631 | { |
2632 | auto instruction = instructions().at(bytecodeIndex); |
2633 | switch (instruction->opcodeID()) { |
2634 | #define CASE1(Op) \ |
2635 | case Op::opcodeID: \ |
2636 | return &instruction->as<Op>().metadata(this).m_arrayProfile; |
2637 | |
2638 | #define CASE2(Op) \ |
2639 | case Op::opcodeID: \ |
2640 | return &instruction->as<Op>().metadata(this).m_callLinkInfo.m_arrayProfile; |
2641 | |
2642 | FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE1) |
2643 | FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(CASE2) |
2644 | |
2645 | #undef CASE1 |
2646 | #undef CASE2 |
2647 | |
2648 | case OpGetById::opcodeID: { |
2649 | auto bytecode = instruction->as<OpGetById>(); |
2650 | auto& metadata = bytecode.metadata(this); |
2651 | if (metadata.m_modeMetadata.mode == GetByIdMode::ArrayLength) |
2652 | return &metadata.m_modeMetadata.arrayLengthMode.arrayProfile; |
2653 | break; |
2654 | } |
2655 | default: |
2656 | break; |
2657 | } |
2658 | |
2659 | return nullptr; |
2660 | } |
2661 | |
2662 | ArrayProfile* CodeBlock::getArrayProfile(BytecodeIndex bytecodeIndex) |
2663 | { |
2664 | ConcurrentJSLocker locker(m_lock); |
2665 | return getArrayProfile(locker, bytecodeIndex); |
2666 | } |
2667 | |
2668 | #if ENABLE(DFG_JIT) |
2669 | Vector<CodeOrigin, 0, UnsafeVectorOverflow>& CodeBlock::codeOrigins() |
2670 | { |
2671 | return m_jitCode->dfgCommon()->codeOrigins; |
2672 | } |
2673 | |
2674 | size_t CodeBlock::numberOfDFGIdentifiers() const |
2675 | { |
2676 | if (!JITCode::isOptimizingJIT(jitType())) |
2677 | return 0; |
2678 | |
2679 | return m_jitCode->dfgCommon()->dfgIdentifiers.size(); |
2680 | } |
2681 | |
2682 | const Identifier& CodeBlock::identifier(int index) const |
2683 | { |
2684 | size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers(); |
2685 | if (static_cast<unsigned>(index) < unlinkedIdentifiers) |
2686 | return m_unlinkedCode->identifier(index); |
2687 | ASSERT(JITCode::isOptimizingJIT(jitType())); |
2688 | return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers]; |
2689 | } |
2690 | #endif // ENABLE(DFG_JIT) |
2691 | |
2692 | void CodeBlock::updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles) |
2693 | { |
2694 | ConcurrentJSLocker locker(m_lock); |
2695 | |
2696 | numberOfLiveNonArgumentValueProfiles = 0; |
2697 | numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full. |
2698 | |
2699 | forEachValueProfile([&](ValueProfile& profile, bool isArgument) { |
2700 | unsigned numSamples = profile.totalNumberOfSamples(); |
2701 | static_assert(ValueProfile::numberOfBuckets == 1); |
2702 | if (numSamples > ValueProfile::numberOfBuckets) |
2703 | numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight. |
2704 | numberOfSamplesInProfiles += numSamples; |
2705 | if (isArgument) { |
2706 | profile.computeUpdatedPrediction(locker); |
2707 | return; |
2708 | } |
2709 | if (profile.numberOfSamples() || profile.isSampledBefore()) |
2710 | numberOfLiveNonArgumentValueProfiles++; |
2711 | profile.computeUpdatedPrediction(locker); |
2712 | }); |
2713 | |
2714 | if (auto* rareData = m_rareData.get()) { |
2715 | for (auto& profileBucket : rareData->m_catchProfiles) { |
2716 | profileBucket->forEach([&] (ValueProfileAndOperand& profile) { |
2717 | profile.computeUpdatedPrediction(locker); |
2718 | }); |
2719 | } |
2720 | } |
2721 | |
2722 | #if ENABLE(DFG_JIT) |
2723 | lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker); |
2724 | #endif |
2725 | } |
2726 | |
2727 | void CodeBlock::updateAllValueProfilePredictions() |
2728 | { |
2729 | unsigned ignoredValue1, ignoredValue2; |
2730 | updateAllValueProfilePredictionsAndCountLiveness(ignoredValue1, ignoredValue2); |
2731 | } |
2732 | |
2733 | void CodeBlock::updateAllArrayPredictions() |
2734 | { |
2735 | ConcurrentJSLocker locker(m_lock); |
2736 | |
2737 | forEachArrayProfile([&](ArrayProfile& profile) { |
2738 | profile.computeUpdatedPrediction(locker, this); |
2739 | }); |
2740 | |
2741 | forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) { |
2742 | profile.updateProfile(); |
2743 | }); |
2744 | } |
2745 | |
2746 | void CodeBlock::updateAllPredictions() |
2747 | { |
2748 | updateAllValueProfilePredictions(); |
2749 | updateAllArrayPredictions(); |
2750 | } |
2751 | |
2752 | bool CodeBlock::shouldOptimizeNow() |
2753 | { |
2754 | if (Options::verboseOSR()) |
2755 | dataLog("Considering optimizing " , *this, "...\n" ); |
2756 | |
2757 | if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay()) |
2758 | return true; |
2759 | |
2760 | updateAllArrayPredictions(); |
2761 | |
2762 | unsigned numberOfLiveNonArgumentValueProfiles; |
2763 | unsigned numberOfSamplesInProfiles; |
2764 | updateAllValueProfilePredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles); |
2765 | |
2766 | if (Options::verboseOSR()) { |
2767 | dataLogF( |
2768 | "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n" , |
2769 | (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles(), |
2770 | numberOfLiveNonArgumentValueProfiles, numberOfNonArgumentValueProfiles(), |
2771 | (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfNonArgumentValueProfiles(), |
2772 | numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfNonArgumentValueProfiles()); |
2773 | } |
2774 | |
2775 | if ((!numberOfNonArgumentValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles() >= Options::desiredProfileLivenessRate()) |
2776 | && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate()) |
2777 | && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay()) |
2778 | return true; |
2779 | |
2780 | ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max()); |
2781 | m_optimizationDelayCounter++; |
2782 | optimizeAfterWarmUp(); |
2783 | return false; |
2784 | } |
2785 | |
2786 | #if ENABLE(DFG_JIT) |
2787 | void CodeBlock::tallyFrequentExitSites() |
2788 | { |
2789 | ASSERT(JITCode::isOptimizingJIT(jitType())); |
2790 | ASSERT(alternative()->jitType() == JITType::BaselineJIT); |
2791 | |
2792 | CodeBlock* profiledBlock = alternative(); |
2793 | |
2794 | switch (jitType()) { |
2795 | case JITType::DFGJIT: { |
2796 | DFG::JITCode* jitCode = m_jitCode->dfg(); |
2797 | for (auto& exit : jitCode->osrExit) |
2798 | exit.considerAddingAsFrequentExitSite(profiledBlock); |
2799 | break; |
2800 | } |
2801 | |
2802 | #if ENABLE(FTL_JIT) |
2803 | case JITType::FTLJIT: { |
2804 | // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit |
2805 | // vector contains a totally different type, that just so happens to behave like |
2806 | // DFG::JITCode::osrExit. |
2807 | FTL::JITCode* jitCode = m_jitCode->ftl(); |
2808 | for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) { |
2809 | FTL::OSRExit& exit = jitCode->osrExit[i]; |
2810 | exit.considerAddingAsFrequentExitSite(profiledBlock); |
2811 | } |
2812 | break; |
2813 | } |
2814 | #endif |
2815 | |
2816 | default: |
2817 | RELEASE_ASSERT_NOT_REACHED(); |
2818 | break; |
2819 | } |
2820 | } |
2821 | #endif // ENABLE(DFG_JIT) |
2822 | |
2823 | void CodeBlock::notifyLexicalBindingUpdate() |
2824 | { |
2825 | // FIXME: Currently, module code do not query to JSGlobalLexicalEnvironment. So this case should be removed once it is fixed. |
2826 | // https://bugs.webkit.org/show_bug.cgi?id=193347 |
2827 | if (scriptMode() == JSParserScriptMode::Module) |
2828 | return; |
2829 | JSGlobalObject* globalObject = m_globalObject.get(); |
2830 | JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope()); |
2831 | SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable(); |
2832 | |
2833 | ConcurrentJSLocker locker(m_lock); |
2834 | |
2835 | auto isShadowed = [&] (UniquedStringImpl* uid) { |
2836 | ConcurrentJSLocker locker(symbolTable->m_lock); |
2837 | return symbolTable->contains(locker, uid); |
2838 | }; |
2839 | |
2840 | const InstructionStream& instructionStream = instructions(); |
2841 | for (const auto& instruction : instructionStream) { |
2842 | OpcodeID opcodeID = instruction->opcodeID(); |
2843 | switch (opcodeID) { |
2844 | case op_resolve_scope: { |
2845 | auto bytecode = instruction->as<OpResolveScope>(); |
2846 | auto& metadata = bytecode.metadata(this); |
2847 | ResolveType originalResolveType = metadata.m_resolveType; |
2848 | if (originalResolveType == GlobalProperty || originalResolveType == GlobalPropertyWithVarInjectionChecks) { |
2849 | const Identifier& ident = identifier(bytecode.m_var); |
2850 | if (isShadowed(ident.impl())) |
2851 | metadata.m_globalLexicalBindingEpoch = 0; |
2852 | else |
2853 | metadata.m_globalLexicalBindingEpoch = globalObject->globalLexicalBindingEpoch(); |
2854 | } |
2855 | break; |
2856 | } |
2857 | default: |
2858 | break; |
2859 | } |
2860 | } |
2861 | } |
2862 | |
2863 | #if ENABLE(VERBOSE_VALUE_PROFILE) |
2864 | void CodeBlock::dumpValueProfiles() |
2865 | { |
2866 | dataLog("ValueProfile for " , *this, ":\n" ); |
2867 | forEachValueProfile([](ValueProfile& profile, bool isArgument) { |
2868 | if (isArgument) |
2869 | dataLogF(" arg: " ); |
2870 | else |
2871 | dataLogF(" bc: " ); |
2872 | if (!profile.numberOfSamples() && profile.m_prediction == SpecNone) { |
2873 | dataLogF("<empty>\n" ); |
2874 | continue; |
2875 | } |
2876 | profile.dump(WTF::dataFile()); |
2877 | dataLogF("\n" ); |
2878 | }); |
2879 | dataLog("RareCaseProfile for " , *this, ":\n" ); |
2880 | if (auto* jitData = m_jitData.get()) { |
2881 | for (RareCaseProfile* profile : jitData->m_rareCaseProfiles) |
2882 | dataLogF(" bc = %d: %u\n" , profile->m_bytecodeOffset, profile->m_counter); |
2883 | } |
2884 | } |
2885 | #endif // ENABLE(VERBOSE_VALUE_PROFILE) |
2886 | |
2887 | unsigned CodeBlock::frameRegisterCount() |
2888 | { |
2889 | switch (jitType()) { |
2890 | case JITType::InterpreterThunk: |
2891 | return LLInt::frameRegisterCountFor(this); |
2892 | |
2893 | #if ENABLE(JIT) |
2894 | case JITType::BaselineJIT: |
2895 | return JIT::frameRegisterCountFor(this); |
2896 | #endif // ENABLE(JIT) |
2897 | |
2898 | #if ENABLE(DFG_JIT) |
2899 | case JITType::DFGJIT: |
2900 | case JITType::FTLJIT: |
2901 | return jitCode()->dfgCommon()->frameRegisterCount; |
2902 | #endif // ENABLE(DFG_JIT) |
2903 | |
2904 | default: |
2905 | RELEASE_ASSERT_NOT_REACHED(); |
2906 | return 0; |
2907 | } |
2908 | } |
2909 | |
2910 | int CodeBlock::stackPointerOffset() |
2911 | { |
2912 | return virtualRegisterForLocal(frameRegisterCount() - 1).offset(); |
2913 | } |
2914 | |
2915 | size_t CodeBlock::predictedMachineCodeSize() |
2916 | { |
2917 | VM* vm = m_vm; |
2918 | // This will be called from CodeBlock::CodeBlock before either m_vm or the |
2919 | // instructions have been initialized. It's OK to return 0 because what will really |
2920 | // matter is the recomputation of this value when the slow path is triggered. |
2921 | if (!vm) |
2922 | return 0; |
2923 | |
2924 | if (!*vm->machineCodeBytesPerBytecodeWordForBaselineJIT) |
2925 | return 0; // It's as good of a prediction as we'll get. |
2926 | |
2927 | // Be conservative: return a size that will be an overestimation 84% of the time. |
2928 | double multiplier = vm->machineCodeBytesPerBytecodeWordForBaselineJIT->mean() + |
2929 | vm->machineCodeBytesPerBytecodeWordForBaselineJIT->standardDeviation(); |
2930 | |
2931 | // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing |
2932 | // here is OK, since this whole method is just a heuristic. |
2933 | if (multiplier < 0 || multiplier > 1000) |
2934 | return 0; |
2935 | |
2936 | double doubleResult = multiplier * bytecodeCost(); |
2937 | |
2938 | // Be even more paranoid: silently reject values that won't fit into a size_t. If |
2939 | // the function is so huge that we can't even fit it into virtual memory then we |
2940 | // should probably have some other guards in place to prevent us from even getting |
2941 | // to this point. |
2942 | if (doubleResult > std::numeric_limits<size_t>::max()) |
2943 | return 0; |
2944 | |
2945 | return static_cast<size_t>(doubleResult); |
2946 | } |
2947 | |
2948 | String CodeBlock::nameForRegister(VirtualRegister virtualRegister) |
2949 | { |
2950 | for (auto& constantRegister : m_constantRegisters) { |
2951 | if (constantRegister.get().isEmpty()) |
2952 | continue; |
2953 | if (SymbolTable* symbolTable = jsDynamicCast<SymbolTable*>(vm(), constantRegister.get())) { |
2954 | ConcurrentJSLocker locker(symbolTable->m_lock); |
2955 | auto end = symbolTable->end(locker); |
2956 | for (auto ptr = symbolTable->begin(locker); ptr != end; ++ptr) { |
2957 | if (ptr->value.varOffset() == VarOffset(virtualRegister)) { |
2958 | // FIXME: This won't work from the compilation thread. |
2959 | // https://bugs.webkit.org/show_bug.cgi?id=115300 |
2960 | return ptr->key.get(); |
2961 | } |
2962 | } |
2963 | } |
2964 | } |
2965 | if (virtualRegister == thisRegister()) |
2966 | return "this"_s ; |
2967 | if (virtualRegister.isArgument()) |
2968 | return makeString("arguments[" , pad(' ', 3, virtualRegister.toArgument()), ']'); |
2969 | |
2970 | return emptyString(); |
2971 | } |
2972 | |
2973 | ValueProfile* CodeBlock::tryGetValueProfileForBytecodeIndex(BytecodeIndex bytecodeIndex) |
2974 | { |
2975 | auto instruction = instructions().at(bytecodeIndex); |
2976 | switch (instruction->opcodeID()) { |
2977 | |
2978 | #define CASE(Op) \ |
2979 | case Op::opcodeID: \ |
2980 | return &instruction->as<Op>().metadata(this).m_profile; |
2981 | |
2982 | FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE) |
2983 | |
2984 | #undef CASE |
2985 | |
2986 | default: |
2987 | return nullptr; |
2988 | |
2989 | } |
2990 | } |
2991 | |
2992 | SpeculatedType CodeBlock::valueProfilePredictionForBytecodeIndex(const ConcurrentJSLocker& locker, BytecodeIndex bytecodeIndex) |
2993 | { |
2994 | if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeIndex(bytecodeIndex)) |
2995 | return valueProfile->computeUpdatedPrediction(locker); |
2996 | return SpecNone; |
2997 | } |
2998 | |
2999 | ValueProfile& CodeBlock::valueProfileForBytecodeIndex(BytecodeIndex bytecodeIndex) |
3000 | { |
3001 | return *tryGetValueProfileForBytecodeIndex(bytecodeIndex); |
3002 | } |
3003 | |
3004 | void CodeBlock::validate() |
3005 | { |
3006 | BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint. |
3007 | |
3008 | FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeIndex(this, BytecodeIndex(0)); |
3009 | |
3010 | if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeLocals)) { |
3011 | beginValidationDidFail(); |
3012 | dataLog(" Wrong number of bits in result!\n" ); |
3013 | dataLog(" Result: " , liveAtHead, "\n" ); |
3014 | dataLog(" Bit count: " , liveAtHead.numBits(), "\n" ); |
3015 | endValidationDidFail(); |
3016 | } |
3017 | |
3018 | for (unsigned i = m_numCalleeLocals; i--;) { |
3019 | VirtualRegister reg = virtualRegisterForLocal(i); |
3020 | |
3021 | if (liveAtHead[i]) { |
3022 | beginValidationDidFail(); |
3023 | dataLog(" Variable " , reg, " is expected to be dead.\n" ); |
3024 | dataLog(" Result: " , liveAtHead, "\n" ); |
3025 | endValidationDidFail(); |
3026 | } |
3027 | } |
3028 | |
3029 | const InstructionStream& instructionStream = instructions(); |
3030 | for (const auto& instruction : instructionStream) { |
3031 | OpcodeID opcode = instruction->opcodeID(); |
3032 | if (!!baselineAlternative()->handlerForBytecodeIndex(BytecodeIndex(instruction.offset()))) { |
3033 | if (opcode == op_catch || opcode == op_enter) { |
3034 | // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be |
3035 | // inside of a try block because they are responsible for bootstrapping state. And they |
3036 | // are never allowed throw an exception because of this. We rely on this when compiling |
3037 | // in the DFG. Because an entrypoint never throws, the bytecode generator will never |
3038 | // allow once inside a try block. |
3039 | beginValidationDidFail(); |
3040 | dataLog(" entrypoint not allowed inside a try block." ); |
3041 | endValidationDidFail(); |
3042 | } |
3043 | } |
3044 | } |
3045 | } |
3046 | |
3047 | void CodeBlock::beginValidationDidFail() |
3048 | { |
3049 | dataLog("Validation failure in " , *this, ":\n" ); |
3050 | dataLog("\n" ); |
3051 | } |
3052 | |
3053 | void CodeBlock::endValidationDidFail() |
3054 | { |
3055 | dataLog("\n" ); |
3056 | dumpBytecode(); |
3057 | dataLog("\n" ); |
3058 | dataLog("Validation failure.\n" ); |
3059 | RELEASE_ASSERT_NOT_REACHED(); |
3060 | } |
3061 | |
3062 | void CodeBlock::addBreakpoint(unsigned numBreakpoints) |
3063 | { |
3064 | m_numBreakpoints += numBreakpoints; |
3065 | ASSERT(m_numBreakpoints); |
3066 | if (JITCode::isOptimizingJIT(jitType())) |
3067 | jettison(Profiler::JettisonDueToDebuggerBreakpoint); |
3068 | } |
3069 | |
3070 | void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode) |
3071 | { |
3072 | m_steppingMode = mode; |
3073 | if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType())) |
3074 | jettison(Profiler::JettisonDueToDebuggerStepping); |
3075 | } |
3076 | |
3077 | int CodeBlock::outOfLineJumpOffset(const Instruction* pc) |
3078 | { |
3079 | int offset = bytecodeOffset(pc); |
3080 | return m_unlinkedCode->outOfLineJumpOffset(offset); |
3081 | } |
3082 | |
3083 | const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc) |
3084 | { |
3085 | int offset = bytecodeOffset(pc); |
3086 | int target = m_unlinkedCode->outOfLineJumpOffset(offset); |
3087 | return instructions().at(offset + target).ptr(); |
3088 | } |
3089 | |
3090 | BinaryArithProfile* CodeBlock::binaryArithProfileForBytecodeIndex(BytecodeIndex bytecodeIndex) |
3091 | { |
3092 | return binaryArithProfileForPC(instructions().at(bytecodeIndex.offset()).ptr()); |
3093 | } |
3094 | |
3095 | UnaryArithProfile* CodeBlock::unaryArithProfileForBytecodeIndex(BytecodeIndex bytecodeIndex) |
3096 | { |
3097 | return unaryArithProfileForPC(instructions().at(bytecodeIndex.offset()).ptr()); |
3098 | } |
3099 | |
3100 | BinaryArithProfile* CodeBlock::binaryArithProfileForPC(const Instruction* pc) |
3101 | { |
3102 | switch (pc->opcodeID()) { |
3103 | case op_add: |
3104 | return &pc->as<OpAdd>().metadata(this).m_arithProfile; |
3105 | case op_mul: |
3106 | return &pc->as<OpMul>().metadata(this).m_arithProfile; |
3107 | case op_sub: |
3108 | return &pc->as<OpSub>().metadata(this).m_arithProfile; |
3109 | case op_div: |
3110 | return &pc->as<OpDiv>().metadata(this).m_arithProfile; |
3111 | default: |
3112 | break; |
3113 | } |
3114 | |
3115 | return nullptr; |
3116 | } |
3117 | |
3118 | UnaryArithProfile* CodeBlock::unaryArithProfileForPC(const Instruction* pc) |
3119 | { |
3120 | switch (pc->opcodeID()) { |
3121 | case op_negate: |
3122 | return &pc->as<OpNegate>().metadata(this).m_arithProfile; |
3123 | case op_inc: |
3124 | return &pc->as<OpInc>().metadata(this).m_arithProfile; |
3125 | case op_dec: |
3126 | return &pc->as<OpDec>().metadata(this).m_arithProfile; |
3127 | default: |
3128 | break; |
3129 | } |
3130 | |
3131 | return nullptr; |
3132 | } |
3133 | |
3134 | bool CodeBlock::couldTakeSpecialArithFastCase(BytecodeIndex bytecodeIndex) |
3135 | { |
3136 | if (!hasBaselineJITProfiling()) |
3137 | return false; |
3138 | BinaryArithProfile* profile = binaryArithProfileForBytecodeIndex(bytecodeIndex); |
3139 | if (!profile) |
3140 | return false; |
3141 | return profile->tookSpecialFastPath(); |
3142 | } |
3143 | |
3144 | #if ENABLE(JIT) |
3145 | DFG::CapabilityLevel CodeBlock::capabilityLevel() |
3146 | { |
3147 | DFG::CapabilityLevel result = computeCapabilityLevel(); |
3148 | m_capabilityLevelState = result; |
3149 | return result; |
3150 | } |
3151 | #endif |
3152 | |
3153 | void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler() |
3154 | { |
3155 | if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets()) |
3156 | return; |
3157 | const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); |
3158 | for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) { |
3159 | // Because op_profile_control_flow is emitted at the beginning of every basic block, finding |
3160 | // the next op_profile_control_flow will give us the text range of a single basic block. |
3161 | size_t startIdx = bytecodeOffsets[i]; |
3162 | auto instruction = instructions().at(startIdx); |
3163 | RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow); |
3164 | auto bytecode = instruction->as<OpProfileControlFlow>(); |
3165 | auto& metadata = bytecode.metadata(this); |
3166 | int basicBlockStartOffset = bytecode.m_textOffset; |
3167 | int basicBlockEndOffset; |
3168 | if (i + 1 < offsetsLength) { |
3169 | size_t endIdx = bytecodeOffsets[i + 1]; |
3170 | auto endInstruction = instructions().at(endIdx); |
3171 | RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow); |
3172 | basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().m_textOffset - 1; |
3173 | } else { |
3174 | basicBlockEndOffset = sourceOffset() + ownerExecutable()->source().length() - 1; // Offset before the closing brace. |
3175 | basicBlockStartOffset = std::min(basicBlockStartOffset, basicBlockEndOffset); // Some start offsets may be at the closing brace, ensure it is the offset before. |
3176 | } |
3177 | |
3178 | // The following check allows for the same textual JavaScript basic block to have its bytecode emitted more |
3179 | // than once and still play nice with the control flow profiler. When basicBlockStartOffset is larger than |
3180 | // basicBlockEndOffset, it indicates that the bytecode generator has emitted code for the same AST node |
3181 | // more than once (for example: ForInNode, Finally blocks in TryNode, etc). Though these are different |
3182 | // basic blocks at the bytecode level, they are generated from the same textual basic block in the JavaScript |
3183 | // program. The condition: |
3184 | // (basicBlockEndOffset < basicBlockStartOffset) |
3185 | // is encountered when op_profile_control_flow lies across the boundary of these duplicated bytecode basic |
3186 | // blocks and the textual offset goes from the end of the duplicated block back to the beginning. These |
3187 | // ranges are dummy ranges and are ignored. The duplicated bytecode basic blocks point to the same |
3188 | // internal data structure, so if any of them execute, it will record the same textual basic block in the |
3189 | // JavaScript program as executing. |
3190 | // At the bytecode level, this situation looks like: |
3191 | // j: op_profile_control_flow (from j->k, we have basicBlockEndOffset < basicBlockStartOffset) |
3192 | // ... |
3193 | // k: op_profile_control_flow (we want to skip over the j->k block and start fresh at offset k as the start of a new basic block k->m). |
3194 | // ... |
3195 | // m: op_profile_control_flow |
3196 | if (basicBlockEndOffset < basicBlockStartOffset) { |
3197 | RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock. |
3198 | metadata.m_basicBlockLocation = vm().controlFlowProfiler()->dummyBasicBlock(); |
3199 | continue; |
3200 | } |
3201 | |
3202 | BasicBlockLocation* basicBlockLocation = vm().controlFlowProfiler()->getBasicBlockLocation(ownerExecutable()->sourceID(), basicBlockStartOffset, basicBlockEndOffset); |
3203 | |
3204 | // Find all functions that are enclosed within the range: [basicBlockStartOffset, basicBlockEndOffset] |
3205 | // and insert these functions' start/end offsets as gaps in the current BasicBlockLocation. |
3206 | // This is necessary because in the original source text of a JavaScript program, |
3207 | // function literals form new basic blocks boundaries, but they aren't represented |
3208 | // inside the CodeBlock's instruction stream. |
3209 | auto insertFunctionGaps = [basicBlockLocation, basicBlockStartOffset, basicBlockEndOffset] (const WriteBarrier<FunctionExecutable>& functionExecutable) { |
3210 | const UnlinkedFunctionExecutable* executable = functionExecutable->unlinkedExecutable(); |
3211 | int functionStart = executable->typeProfilingStartOffset(); |
3212 | int functionEnd = executable->typeProfilingEndOffset(); |
3213 | if (functionStart >= basicBlockStartOffset && functionEnd <= basicBlockEndOffset) |
3214 | basicBlockLocation->insertGap(functionStart, functionEnd); |
3215 | }; |
3216 | |
3217 | for (const WriteBarrier<FunctionExecutable>& executable : m_functionDecls) |
3218 | insertFunctionGaps(executable); |
3219 | for (const WriteBarrier<FunctionExecutable>& executable : m_functionExprs) |
3220 | insertFunctionGaps(executable); |
3221 | |
3222 | metadata.m_basicBlockLocation = basicBlockLocation; |
3223 | } |
3224 | } |
3225 | |
3226 | #if ENABLE(JIT) |
3227 | void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map) |
3228 | { |
3229 | ConcurrentJSLocker locker(m_lock); |
3230 | ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map); |
3231 | } |
3232 | |
3233 | Optional<CodeOrigin> CodeBlock::findPC(void* pc) |
3234 | { |
3235 | { |
3236 | ConcurrentJSLocker locker(m_lock); |
3237 | if (auto* jitData = m_jitData.get()) { |
3238 | if (jitData->m_pcToCodeOriginMap) { |
3239 | if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc)) |
3240 | return codeOrigin; |
3241 | } |
3242 | |
3243 | for (StructureStubInfo* stubInfo : jitData->m_stubInfos) { |
3244 | if (stubInfo->containsPC(pc)) |
3245 | return Optional<CodeOrigin>(stubInfo->codeOrigin); |
3246 | } |
3247 | } |
3248 | } |
3249 | |
3250 | if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc)) |
3251 | return codeOrigin; |
3252 | |
3253 | return WTF::nullopt; |
3254 | } |
3255 | #endif // ENABLE(JIT) |
3256 | |
3257 | Optional<BytecodeIndex> CodeBlock::bytecodeIndexFromCallSiteIndex(CallSiteIndex callSiteIndex) |
3258 | { |
3259 | Optional<BytecodeIndex> bytecodeIndex; |
3260 | JITType jitType = this->jitType(); |
3261 | if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) { |
3262 | #if USE(JSVALUE64) |
3263 | bytecodeIndex = callSiteIndex.bytecodeIndex(); |
3264 | #else |
3265 | Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits()); |
3266 | bytecodeIndex = this->bytecodeIndex(instruction); |
3267 | #endif |
3268 | } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) { |
3269 | #if ENABLE(DFG_JIT) |
3270 | RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex)); |
3271 | CodeOrigin origin = codeOrigin(callSiteIndex); |
3272 | bytecodeIndex = origin.bytecodeIndex(); |
3273 | #else |
3274 | RELEASE_ASSERT_NOT_REACHED(); |
3275 | #endif |
3276 | } |
3277 | |
3278 | return bytecodeIndex; |
3279 | } |
3280 | |
3281 | int32_t CodeBlock::thresholdForJIT(int32_t threshold) |
3282 | { |
3283 | switch (unlinkedCodeBlock()->didOptimize()) { |
3284 | case MixedTriState: |
3285 | return threshold; |
3286 | case FalseTriState: |
3287 | return threshold * 4; |
3288 | case TrueTriState: |
3289 | return threshold / 2; |
3290 | } |
3291 | ASSERT_NOT_REACHED(); |
3292 | return threshold; |
3293 | } |
3294 | |
3295 | void CodeBlock::jitAfterWarmUp() |
3296 | { |
3297 | m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITAfterWarmUp()), this); |
3298 | } |
3299 | |
3300 | void CodeBlock::jitSoon() |
3301 | { |
3302 | m_llintExecuteCounter.setNewThreshold(thresholdForJIT(Options::thresholdForJITSoon()), this); |
3303 | } |
3304 | |
3305 | bool CodeBlock::hasInstalledVMTrapBreakpoints() const |
3306 | { |
3307 | #if ENABLE(SIGNAL_BASED_VM_TRAPS) |
3308 | // This function may be called from a signal handler. We need to be |
3309 | // careful to not call anything that is not signal handler safe, e.g. |
3310 | // we should not perturb the refCount of m_jitCode. |
3311 | if (!JITCode::isOptimizingJIT(jitType())) |
3312 | return false; |
3313 | return m_jitCode->dfgCommon()->hasInstalledVMTrapsBreakpoints(); |
3314 | #else |
3315 | return false; |
3316 | #endif |
3317 | } |
3318 | |
3319 | bool CodeBlock::installVMTrapBreakpoints() |
3320 | { |
3321 | #if ENABLE(SIGNAL_BASED_VM_TRAPS) |
3322 | // This function may be called from a signal handler. We need to be |
3323 | // careful to not call anything that is not signal handler safe, e.g. |
3324 | // we should not perturb the refCount of m_jitCode. |
3325 | if (!JITCode::isOptimizingJIT(jitType())) |
3326 | return false; |
3327 | auto& commonData = *m_jitCode->dfgCommon(); |
3328 | commonData.installVMTrapBreakpoints(this); |
3329 | return true; |
3330 | #else |
3331 | UNREACHABLE_FOR_PLATFORM(); |
3332 | return false; |
3333 | #endif |
3334 | } |
3335 | |
3336 | void CodeBlock::dumpMathICStats() |
3337 | { |
3338 | #if ENABLE(MATH_IC_STATS) |
3339 | double numAdds = 0.0; |
3340 | double totalAddSize = 0.0; |
3341 | double numMuls = 0.0; |
3342 | double totalMulSize = 0.0; |
3343 | double numNegs = 0.0; |
3344 | double totalNegSize = 0.0; |
3345 | double numSubs = 0.0; |
3346 | double totalSubSize = 0.0; |
3347 | |
3348 | auto countICs = [&] (CodeBlock* codeBlock) { |
3349 | if (auto* jitData = codeBlock->m_jitData.get()) { |
3350 | for (JITAddIC* addIC : jitData->m_addICs) { |
3351 | numAdds++; |
3352 | totalAddSize += addIC->codeSize(); |
3353 | } |
3354 | |
3355 | for (JITMulIC* mulIC : jitData->m_mulICs) { |
3356 | numMuls++; |
3357 | totalMulSize += mulIC->codeSize(); |
3358 | } |
3359 | |
3360 | for (JITNegIC* negIC : jitData->m_negICs) { |
3361 | numNegs++; |
3362 | totalNegSize += negIC->codeSize(); |
3363 | } |
3364 | |
3365 | for (JITSubIC* subIC : jitData->m_subICs) { |
3366 | numSubs++; |
3367 | totalSubSize += subIC->codeSize(); |
3368 | } |
3369 | } |
3370 | }; |
3371 | heap()->forEachCodeBlock(countICs); |
3372 | |
3373 | dataLog("Num Adds: " , numAdds, "\n" ); |
3374 | dataLog("Total Add size in bytes: " , totalAddSize, "\n" ); |
3375 | dataLog("Average Add size: " , totalAddSize / numAdds, "\n" ); |
3376 | dataLog("\n" ); |
3377 | dataLog("Num Muls: " , numMuls, "\n" ); |
3378 | dataLog("Total Mul size in bytes: " , totalMulSize, "\n" ); |
3379 | dataLog("Average Mul size: " , totalMulSize / numMuls, "\n" ); |
3380 | dataLog("\n" ); |
3381 | dataLog("Num Negs: " , numNegs, "\n" ); |
3382 | dataLog("Total Neg size in bytes: " , totalNegSize, "\n" ); |
3383 | dataLog("Average Neg size: " , totalNegSize / numNegs, "\n" ); |
3384 | dataLog("\n" ); |
3385 | dataLog("Num Subs: " , numSubs, "\n" ); |
3386 | dataLog("Total Sub size in bytes: " , totalSubSize, "\n" ); |
3387 | dataLog("Average Sub size: " , totalSubSize / numSubs, "\n" ); |
3388 | |
3389 | dataLog("-----------------------\n" ); |
3390 | #endif |
3391 | } |
3392 | |
3393 | void setPrinter(Printer::PrintRecord& record, CodeBlock* codeBlock) |
3394 | { |
3395 | Printer::setPrinter(record, toCString(codeBlock)); |
3396 | } |
3397 | |
3398 | } // namespace JSC |
3399 | |
3400 | namespace WTF { |
3401 | |
3402 | void printInternal(PrintStream& out, JSC::CodeBlock* codeBlock) |
3403 | { |
3404 | if (UNLIKELY(!codeBlock)) { |
3405 | out.print("<null codeBlock>" ); |
3406 | return; |
3407 | } |
3408 | out.print(*codeBlock); |
3409 | } |
3410 | |
3411 | } // namespace WTF |
3412 | |