1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2008 Cameron Zwarich <[email protected]> |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of |
15 | * its contributors may be used to endorse or promote products derived |
16 | * from this software without specific prior written permission. |
17 | * |
18 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
19 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
20 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
21 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
22 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
23 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
24 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
25 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | */ |
29 | |
30 | #pragma once |
31 | |
32 | #include "ArrayProfile.h" |
33 | #include "ByValInfo.h" |
34 | #include "BytecodeConventions.h" |
35 | #include "CallLinkInfo.h" |
36 | #include "CodeBlockHash.h" |
37 | #include "CodeOrigin.h" |
38 | #include "CodeType.h" |
39 | #include "CompilationResult.h" |
40 | #include "ConcurrentJSLock.h" |
41 | #include "DFGCommon.h" |
42 | #include "DirectEvalCodeCache.h" |
43 | #include "EvalExecutable.h" |
44 | #include "ExecutionCounter.h" |
45 | #include "ExpressionRangeInfo.h" |
46 | #include "FunctionExecutable.h" |
47 | #include "HandlerInfo.h" |
48 | #include "ICStatusMap.h" |
49 | #include "Instruction.h" |
50 | #include "InstructionStream.h" |
51 | #include "JITCode.h" |
52 | #include "JITCodeMap.h" |
53 | #include "JITMathICForwards.h" |
54 | #include "JSCast.h" |
55 | #include "JSGlobalObject.h" |
56 | #include "JumpTable.h" |
57 | #include "LLIntCallLinkInfo.h" |
58 | #include "LazyOperandValueProfile.h" |
59 | #include "MetadataTable.h" |
60 | #include "ModuleProgramExecutable.h" |
61 | #include "ObjectAllocationProfile.h" |
62 | #include "Options.h" |
63 | #include "Printer.h" |
64 | #include "ProfilerJettisonReason.h" |
65 | #include "ProgramExecutable.h" |
66 | #include "PutPropertySlot.h" |
67 | #include "ValueProfile.h" |
68 | #include "VirtualRegister.h" |
69 | #include "Watchpoint.h" |
70 | #include <wtf/Bag.h> |
71 | #include <wtf/FastMalloc.h> |
72 | #include <wtf/RefCountedArray.h> |
73 | #include <wtf/RefPtr.h> |
74 | #include <wtf/SegmentedVector.h> |
75 | #include <wtf/Vector.h> |
76 | #include <wtf/text/WTFString.h> |
77 | |
78 | namespace JSC { |
79 | |
80 | #if ENABLE(DFG_JIT) |
81 | namespace DFG { |
82 | struct OSRExitState; |
83 | } // namespace DFG |
84 | #endif |
85 | |
86 | class UnaryArithProfile; |
87 | class BinaryArithProfile; |
88 | class BytecodeLivenessAnalysis; |
89 | class CodeBlockSet; |
90 | class ExecutableToCodeBlockEdge; |
91 | class JSModuleEnvironment; |
92 | class ; |
93 | class LLIntPrototypeLoadAdaptiveStructureWatchpoint; |
94 | class MetadataTable; |
95 | class PCToCodeOriginMap; |
96 | class RegisterAtOffsetList; |
97 | class StructureStubInfo; |
98 | |
99 | enum class AccessType : int8_t; |
100 | |
101 | struct OpCatch; |
102 | |
103 | enum ReoptimizationMode { DontCountReoptimization, CountReoptimization }; |
104 | |
105 | class CodeBlock : public JSCell { |
106 | typedef JSCell Base; |
107 | friend class BytecodeLivenessAnalysis; |
108 | friend class JIT; |
109 | friend class LLIntOffsetsExtractor; |
110 | |
111 | public: |
112 | |
113 | enum CopyParsedBlockTag { CopyParsedBlock }; |
114 | |
115 | static constexpr unsigned StructureFlags = Base::StructureFlags | StructureIsImmortal; |
116 | static constexpr bool needsDestruction = true; |
117 | |
118 | template<typename, SubspaceAccess> |
119 | static IsoSubspace* subspaceFor(VM&) { return nullptr; } |
120 | // GC strongly assumes CodeBlock is not a PreciseAllocation for now. |
121 | static constexpr uint8_t numberOfLowerTierCells = 0; |
122 | |
123 | DECLARE_INFO; |
124 | |
125 | protected: |
126 | CodeBlock(VM&, Structure*, CopyParsedBlockTag, CodeBlock& other); |
127 | CodeBlock(VM&, Structure*, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*); |
128 | |
129 | void finishCreation(VM&, CopyParsedBlockTag, CodeBlock& other); |
130 | bool finishCreation(VM&, ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*); |
131 | |
132 | void finishCreationCommon(VM&); |
133 | |
134 | WriteBarrier<JSGlobalObject> m_globalObject; |
135 | |
136 | public: |
137 | JS_EXPORT_PRIVATE ~CodeBlock(); |
138 | |
139 | UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); } |
140 | |
141 | CString inferredName() const; |
142 | CodeBlockHash hash() const; |
143 | bool hasHash() const; |
144 | bool isSafeToComputeHash() const; |
145 | CString hashAsStringIfPossible() const; |
146 | CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature. |
147 | CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space. |
148 | void dumpAssumingJITType(PrintStream&, JITType) const; |
149 | JS_EXPORT_PRIVATE void dump(PrintStream&) const; |
150 | |
151 | MetadataTable* metadataTable() const { return m_metadata.get(); } |
152 | |
153 | int numParameters() const { return m_numParameters; } |
154 | void setNumParameters(int newValue); |
155 | |
156 | int numberOfArgumentsToSkip() const { return m_numberOfArgumentsToSkip; } |
157 | |
158 | int numCalleeLocals() const { return m_numCalleeLocals; } |
159 | |
160 | int numVars() const { return m_numVars; } |
161 | |
162 | int* addressOfNumParameters() { return &m_numParameters; } |
163 | static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); } |
164 | |
165 | CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); } |
166 | void setAlternative(VM&, CodeBlock*); |
167 | |
168 | template <typename Functor> void forEachRelatedCodeBlock(Functor&& functor) |
169 | { |
170 | Functor f(std::forward<Functor>(functor)); |
171 | Vector<CodeBlock*, 4> codeBlocks; |
172 | codeBlocks.append(this); |
173 | |
174 | while (!codeBlocks.isEmpty()) { |
175 | CodeBlock* currentCodeBlock = codeBlocks.takeLast(); |
176 | f(currentCodeBlock); |
177 | |
178 | if (CodeBlock* alternative = currentCodeBlock->alternative()) |
179 | codeBlocks.append(alternative); |
180 | if (CodeBlock* osrEntryBlock = currentCodeBlock->specialOSREntryBlockOrNull()) |
181 | codeBlocks.append(osrEntryBlock); |
182 | } |
183 | } |
184 | |
185 | CodeSpecializationKind specializationKind() const |
186 | { |
187 | return specializationFromIsConstruct(isConstructor()); |
188 | } |
189 | |
190 | CodeBlock* alternativeForJettison(); |
191 | JS_EXPORT_PRIVATE CodeBlock* baselineAlternative(); |
192 | |
193 | // FIXME: Get rid of this. |
194 | // https://bugs.webkit.org/show_bug.cgi?id=123677 |
195 | CodeBlock* baselineVersion(); |
196 | |
197 | static size_t estimatedSize(JSCell*, VM&); |
198 | static void visitChildren(JSCell*, SlotVisitor&); |
199 | static void destroy(JSCell*); |
200 | void visitChildren(SlotVisitor&); |
201 | void finalizeUnconditionally(VM&); |
202 | |
203 | void notifyLexicalBindingUpdate(); |
204 | |
205 | void dumpSource(); |
206 | void dumpSource(PrintStream&); |
207 | |
208 | void dumpBytecode(); |
209 | void dumpBytecode(PrintStream&); |
210 | void dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& = ICStatusMap()); |
211 | void dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& = ICStatusMap()); |
212 | |
213 | void dumpExceptionHandlers(PrintStream&); |
214 | void printStructures(PrintStream&, const Instruction*); |
215 | void printStructure(PrintStream&, const char* name, const Instruction*, int operand); |
216 | |
217 | void dumpMathICStats(); |
218 | |
219 | bool isStrictMode() const { return m_unlinkedCode->isStrictMode(); } |
220 | bool isConstructor() const { return m_unlinkedCode->isConstructor(); } |
221 | ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; } |
222 | CodeType codeType() const { return m_unlinkedCode->codeType(); } |
223 | |
224 | JSParserScriptMode scriptMode() const { return m_unlinkedCode->scriptMode(); } |
225 | |
226 | bool hasInstalledVMTrapBreakpoints() const; |
227 | bool installVMTrapBreakpoints(); |
228 | |
229 | inline bool isKnownNotImmediate(int index) |
230 | { |
231 | if (index == thisRegister().offset() && !isStrictMode()) |
232 | return true; |
233 | |
234 | if (isConstantRegisterIndex(index)) |
235 | return getConstant(index).isCell(); |
236 | |
237 | return false; |
238 | } |
239 | |
240 | ALWAYS_INLINE bool isTemporaryRegisterIndex(int index) |
241 | { |
242 | return index >= m_numVars; |
243 | } |
244 | |
245 | HandlerInfo* handlerForBytecodeIndex(BytecodeIndex, RequiredHandler = RequiredHandler::AnyHandler); |
246 | HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler); |
247 | void removeExceptionHandlerForCallSite(DisposableCallSiteIndex); |
248 | unsigned lineNumberForBytecodeIndex(BytecodeIndex); |
249 | unsigned columnNumberForBytecodeIndex(BytecodeIndex); |
250 | void expressionRangeForBytecodeIndex(BytecodeIndex, int& divot, |
251 | int& startOffset, int& endOffset, unsigned& line, unsigned& column) const; |
252 | |
253 | Optional<BytecodeIndex> bytecodeIndexFromCallSiteIndex(CallSiteIndex); |
254 | |
255 | void getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result); |
256 | void getICStatusMap(ICStatusMap& result); |
257 | |
258 | #if ENABLE(JIT) |
259 | struct JITData { |
260 | WTF_MAKE_STRUCT_FAST_ALLOCATED; |
261 | |
262 | Bag<StructureStubInfo> m_stubInfos; |
263 | Bag<JITAddIC> m_addICs; |
264 | Bag<JITMulIC> m_mulICs; |
265 | Bag<JITNegIC> m_negICs; |
266 | Bag<JITSubIC> m_subICs; |
267 | Bag<ByValInfo> m_byValInfos; |
268 | Bag<CallLinkInfo> m_callLinkInfos; |
269 | SentinelLinkedList<CallLinkInfo, PackedRawSentinelNode<CallLinkInfo>> m_incomingCalls; |
270 | SentinelLinkedList<PolymorphicCallNode, PackedRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls; |
271 | SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; |
272 | std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap; |
273 | std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters; |
274 | JITCodeMap m_jitCodeMap; |
275 | }; |
276 | |
277 | JITData& ensureJITData(const ConcurrentJSLocker& locker) |
278 | { |
279 | if (LIKELY(m_jitData)) |
280 | return *m_jitData; |
281 | return ensureJITDataSlow(locker); |
282 | } |
283 | JITData& ensureJITDataSlow(const ConcurrentJSLocker&); |
284 | |
285 | JITAddIC* addJITAddIC(BinaryArithProfile*); |
286 | JITMulIC* addJITMulIC(BinaryArithProfile*); |
287 | JITNegIC* addJITNegIC(UnaryArithProfile*); |
288 | JITSubIC* addJITSubIC(BinaryArithProfile*); |
289 | |
290 | template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type> |
291 | JITAddIC* addMathIC(BinaryArithProfile* profile) { return addJITAddIC(profile); } |
292 | |
293 | template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type> |
294 | JITMulIC* addMathIC(BinaryArithProfile* profile) { return addJITMulIC(profile); } |
295 | |
296 | template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type> |
297 | JITNegIC* addMathIC(UnaryArithProfile* profile) { return addJITNegIC(profile); } |
298 | |
299 | template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type> |
300 | JITSubIC* addMathIC(BinaryArithProfile* profile) { return addJITSubIC(profile); } |
301 | |
302 | StructureStubInfo* addStubInfo(AccessType); |
303 | |
304 | // O(n) operation. Use getStubInfoMap() unless you really only intend to get one |
305 | // stub info. |
306 | StructureStubInfo* findStubInfo(CodeOrigin); |
307 | |
308 | ByValInfo* addByValInfo(); |
309 | |
310 | CallLinkInfo* addCallLinkInfo(); |
311 | |
312 | // This is a slow function call used primarily for compiling OSR exits in the case |
313 | // that there had been inlining. Chances are if you want to use this, you're really |
314 | // looking for a CallLinkInfoMap to amortize the cost of calling this. |
315 | CallLinkInfo* getCallLinkInfoForBytecodeIndex(BytecodeIndex); |
316 | |
317 | void setJITCodeMap(JITCodeMap&& jitCodeMap) |
318 | { |
319 | ConcurrentJSLocker locker(m_lock); |
320 | ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap); |
321 | } |
322 | const JITCodeMap& jitCodeMap() |
323 | { |
324 | ConcurrentJSLocker locker(m_lock); |
325 | return ensureJITData(locker).m_jitCodeMap; |
326 | } |
327 | |
328 | void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&); |
329 | Optional<CodeOrigin> findPC(void* pc); |
330 | |
331 | void setCalleeSaveRegisters(RegisterSet); |
332 | void setCalleeSaveRegisters(std::unique_ptr<RegisterAtOffsetList>); |
333 | |
334 | RareCaseProfile* addRareCaseProfile(BytecodeIndex); |
335 | RareCaseProfile* rareCaseProfileForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex); |
336 | unsigned rareCaseProfileCountForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex); |
337 | |
338 | bool likelyToTakeSlowCase(BytecodeIndex bytecodeIndex) |
339 | { |
340 | if (!hasBaselineJITProfiling()) |
341 | return false; |
342 | ConcurrentJSLocker locker(m_lock); |
343 | unsigned value = rareCaseProfileCountForBytecodeIndex(locker, bytecodeIndex); |
344 | return value >= Options::likelyToTakeSlowCaseMinimumCount(); |
345 | } |
346 | |
347 | bool couldTakeSlowCase(BytecodeIndex bytecodeIndex) |
348 | { |
349 | if (!hasBaselineJITProfiling()) |
350 | return false; |
351 | ConcurrentJSLocker locker(m_lock); |
352 | unsigned value = rareCaseProfileCountForBytecodeIndex(locker, bytecodeIndex); |
353 | return value >= Options::couldTakeSlowCaseMinimumCount(); |
354 | } |
355 | |
356 | // We call this when we want to reattempt compiling something with the baseline JIT. Ideally |
357 | // the baseline JIT would not add data to CodeBlock, but instead it would put its data into |
358 | // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we |
359 | // would be able to get rid of this silly function. |
360 | // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061 |
361 | void resetJITData(); |
362 | #endif // ENABLE(JIT) |
363 | |
364 | void unlinkIncomingCalls(); |
365 | |
366 | #if ENABLE(JIT) |
367 | void linkIncomingCall(CallFrame* callerFrame, CallLinkInfo*); |
368 | void linkIncomingPolymorphicCall(CallFrame* callerFrame, PolymorphicCallNode*); |
369 | #endif // ENABLE(JIT) |
370 | |
371 | void linkIncomingCall(CallFrame* callerFrame, LLIntCallLinkInfo*); |
372 | |
373 | const Instruction* outOfLineJumpTarget(const Instruction* pc); |
374 | int outOfLineJumpOffset(const Instruction* pc); |
375 | int outOfLineJumpOffset(const InstructionStream::Ref& instruction) |
376 | { |
377 | return outOfLineJumpOffset(instruction.ptr()); |
378 | } |
379 | |
380 | inline unsigned bytecodeOffset(const Instruction* returnAddress) |
381 | { |
382 | const auto* instructionsBegin = instructions().at(0).ptr(); |
383 | const auto* instructionsEnd = reinterpret_cast<const Instruction*>(reinterpret_cast<uintptr_t>(instructionsBegin) + instructions().size()); |
384 | RELEASE_ASSERT(returnAddress >= instructionsBegin && returnAddress < instructionsEnd); |
385 | return returnAddress - instructionsBegin; |
386 | } |
387 | |
388 | inline BytecodeIndex bytecodeIndex(const Instruction* returnAddress) |
389 | { |
390 | return BytecodeIndex(bytecodeOffset(returnAddress)); |
391 | } |
392 | |
393 | const InstructionStream& instructions() const { return m_unlinkedCode->instructions(); } |
394 | |
395 | size_t predictedMachineCodeSize(); |
396 | |
397 | unsigned instructionsSize() const { return instructions().size(); } |
398 | unsigned bytecodeCost() const { return m_bytecodeCost; } |
399 | |
400 | // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind()) |
401 | CodeBlock* newReplacement(); |
402 | |
403 | void setJITCode(Ref<JITCode>&& code) |
404 | { |
405 | ASSERT(heap()->isDeferred()); |
406 | if (!code->isShared()) |
407 | heap()->reportExtraMemoryAllocated(code->size()); |
408 | |
409 | ConcurrentJSLocker locker(m_lock); |
410 | WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid. |
411 | m_jitCode = WTFMove(code); |
412 | } |
413 | |
414 | RefPtr<JITCode> jitCode() { return m_jitCode; } |
415 | static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); } |
416 | JITType jitType() const |
417 | { |
418 | JITCode* jitCode = m_jitCode.get(); |
419 | WTF::loadLoadFence(); |
420 | JITType result = JITCode::jitTypeFor(jitCode); |
421 | WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good. |
422 | return result; |
423 | } |
424 | |
425 | bool hasBaselineJITProfiling() const |
426 | { |
427 | return jitType() == JITType::BaselineJIT; |
428 | } |
429 | |
430 | #if ENABLE(JIT) |
431 | CodeBlock* replacement(); |
432 | |
433 | DFG::CapabilityLevel computeCapabilityLevel(); |
434 | DFG::CapabilityLevel capabilityLevel(); |
435 | DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); } |
436 | |
437 | bool hasOptimizedReplacement(JITType typeToReplace); |
438 | bool hasOptimizedReplacement(); // the typeToReplace is my JITType |
439 | #endif |
440 | |
441 | void jettison(Profiler::JettisonReason, ReoptimizationMode = DontCountReoptimization, const FireDetail* = nullptr); |
442 | |
443 | ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); } |
444 | |
445 | ExecutableToCodeBlockEdge* ownerEdge() const { return m_ownerEdge.get(); } |
446 | |
447 | VM& vm() const { return *m_vm; } |
448 | |
449 | VirtualRegister thisRegister() const { return m_unlinkedCode->thisRegister(); } |
450 | |
451 | bool usesEval() const { return m_unlinkedCode->usesEval(); } |
452 | |
453 | void setScopeRegister(VirtualRegister scopeRegister) |
454 | { |
455 | ASSERT(scopeRegister.isLocal() || !scopeRegister.isValid()); |
456 | m_scopeRegister = scopeRegister; |
457 | } |
458 | |
459 | VirtualRegister scopeRegister() const |
460 | { |
461 | return m_scopeRegister; |
462 | } |
463 | |
464 | PutPropertySlot::Context putByIdContext() const |
465 | { |
466 | if (codeType() == EvalCode) |
467 | return PutPropertySlot::PutByIdEval; |
468 | return PutPropertySlot::PutById; |
469 | } |
470 | |
471 | const SourceCode& source() const { return m_ownerExecutable->source(); } |
472 | unsigned sourceOffset() const { return m_ownerExecutable->source().startOffset(); } |
473 | unsigned firstLineColumnOffset() const { return m_ownerExecutable->startColumn(); } |
474 | |
475 | size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); } |
476 | unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); } |
477 | |
478 | String nameForRegister(VirtualRegister); |
479 | |
480 | unsigned numberOfArgumentValueProfiles() |
481 | { |
482 | ASSERT(m_numParameters >= 0); |
483 | ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters) || !vm().canUseJIT()); |
484 | return m_argumentValueProfiles.size(); |
485 | } |
486 | |
487 | ValueProfile& valueProfileForArgument(unsigned argumentIndex) |
488 | { |
489 | ASSERT(vm().canUseJIT()); // This is only called from the various JIT compilers or places that first check numberOfArgumentValueProfiles before calling this. |
490 | ValueProfile& result = m_argumentValueProfiles[argumentIndex]; |
491 | return result; |
492 | } |
493 | |
494 | ValueProfile& valueProfileForBytecodeIndex(BytecodeIndex); |
495 | SpeculatedType valueProfilePredictionForBytecodeIndex(const ConcurrentJSLocker&, BytecodeIndex); |
496 | |
497 | template<typename Functor> void forEachValueProfile(const Functor&); |
498 | template<typename Functor> void forEachArrayProfile(const Functor&); |
499 | template<typename Functor> void forEachArrayAllocationProfile(const Functor&); |
500 | template<typename Functor> void forEachObjectAllocationProfile(const Functor&); |
501 | template<typename Functor> void forEachLLIntCallLinkInfo(const Functor&); |
502 | |
503 | BinaryArithProfile* binaryArithProfileForBytecodeIndex(BytecodeIndex); |
504 | UnaryArithProfile* unaryArithProfileForBytecodeIndex(BytecodeIndex); |
505 | BinaryArithProfile* binaryArithProfileForPC(const Instruction*); |
506 | UnaryArithProfile* unaryArithProfileForPC(const Instruction*); |
507 | |
508 | bool couldTakeSpecialArithFastCase(BytecodeIndex bytecodeOffset); |
509 | |
510 | ArrayProfile* getArrayProfile(const ConcurrentJSLocker&, BytecodeIndex); |
511 | ArrayProfile* getArrayProfile(BytecodeIndex); |
512 | |
513 | // Exception handling support |
514 | |
515 | size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; } |
516 | HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } |
517 | |
518 | bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); } |
519 | |
520 | #if ENABLE(DFG_JIT) |
521 | Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins(); |
522 | |
523 | // Having code origins implies that there has been some inlining. |
524 | bool hasCodeOrigins() |
525 | { |
526 | return JITCode::isOptimizingJIT(jitType()); |
527 | } |
528 | |
529 | bool canGetCodeOrigin(CallSiteIndex index) |
530 | { |
531 | if (!hasCodeOrigins()) |
532 | return false; |
533 | return index.bits() < codeOrigins().size(); |
534 | } |
535 | |
536 | CodeOrigin codeOrigin(CallSiteIndex index) |
537 | { |
538 | return codeOrigins()[index.bits()]; |
539 | } |
540 | |
541 | CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles(const ConcurrentJSLocker&) |
542 | { |
543 | return m_lazyOperandValueProfiles; |
544 | } |
545 | #endif // ENABLE(DFG_JIT) |
546 | |
547 | // Constant Pool |
548 | #if ENABLE(DFG_JIT) |
549 | size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); } |
550 | size_t numberOfDFGIdentifiers() const; |
551 | const Identifier& identifier(int index) const; |
552 | #else |
553 | size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); } |
554 | const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); } |
555 | #endif |
556 | |
557 | Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; } |
558 | Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation() { return m_constantsSourceCodeRepresentation; } |
559 | unsigned addConstant(const ConcurrentJSLocker&, JSValue v) |
560 | { |
561 | unsigned result = m_constantRegisters.size(); |
562 | m_constantRegisters.append(WriteBarrier<Unknown>()); |
563 | m_constantRegisters.last().set(*m_vm, this, v); |
564 | m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other); |
565 | return result; |
566 | } |
567 | |
568 | unsigned addConstantLazily(const ConcurrentJSLocker&) |
569 | { |
570 | unsigned result = m_constantRegisters.size(); |
571 | m_constantRegisters.append(WriteBarrier<Unknown>()); |
572 | m_constantsSourceCodeRepresentation.append(SourceCodeRepresentation::Other); |
573 | return result; |
574 | } |
575 | |
576 | const Vector<WriteBarrier<Unknown>>& constantRegisters() { return m_constantRegisters; } |
577 | WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; } |
578 | static ALWAYS_INLINE bool isConstantRegisterIndex(int index) { return index >= FirstConstantRegisterIndex; } |
579 | ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); } |
580 | ALWAYS_INLINE SourceCodeRepresentation constantSourceCodeRepresentation(int index) const { return m_constantsSourceCodeRepresentation[index - FirstConstantRegisterIndex]; } |
581 | |
582 | FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); } |
583 | int numberOfFunctionDecls() { return m_functionDecls.size(); } |
584 | FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); } |
585 | |
586 | const BitVector& bitVector(size_t i) { return m_unlinkedCode->bitVector(i); } |
587 | |
588 | Heap* heap() const { return &m_vm->heap; } |
589 | JSGlobalObject* globalObject() { return m_globalObject.get(); } |
590 | |
591 | JSGlobalObject* globalObjectFor(CodeOrigin); |
592 | |
593 | BytecodeLivenessAnalysis& livenessAnalysis() |
594 | { |
595 | return m_unlinkedCode->livenessAnalysis(this); |
596 | } |
597 | |
598 | void validate(); |
599 | |
600 | // Jump Tables |
601 | |
602 | size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; } |
603 | SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); } |
604 | SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; } |
605 | void clearSwitchJumpTables() |
606 | { |
607 | if (!m_rareData) |
608 | return; |
609 | m_rareData->m_switchJumpTables.clear(); |
610 | } |
611 | #if ENABLE(DFG_JIT) |
612 | void addSwitchJumpTableFromProfiledCodeBlock(SimpleJumpTable& profiled) |
613 | { |
614 | createRareDataIfNecessary(); |
615 | m_rareData->m_switchJumpTables.append(profiled.cloneNonJITPart()); |
616 | } |
617 | #endif |
618 | |
619 | size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; } |
620 | StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); } |
621 | StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; } |
622 | |
623 | DirectEvalCodeCache& directEvalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_directEvalCodeCache; } |
624 | |
625 | enum ShrinkMode { |
626 | // Shrink prior to generating machine code that may point directly into vectors. |
627 | EarlyShrink, |
628 | |
629 | // Shrink after generating machine code, and after possibly creating new vectors |
630 | // and appending to others. At this time it is not safe to shrink certain vectors |
631 | // because we would have generated machine code that references them directly. |
632 | LateShrink |
633 | }; |
634 | void shrinkToFit(ShrinkMode); |
635 | |
636 | // Functions for controlling when JITting kicks in, in a mixed mode |
637 | // execution world. |
638 | |
639 | bool checkIfJITThresholdReached() |
640 | { |
641 | return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this); |
642 | } |
643 | |
644 | void dontJITAnytimeSoon() |
645 | { |
646 | m_llintExecuteCounter.deferIndefinitely(); |
647 | } |
648 | |
649 | int32_t thresholdForJIT(int32_t threshold); |
650 | void jitAfterWarmUp(); |
651 | void jitSoon(); |
652 | |
653 | const BaselineExecutionCounter& llintExecuteCounter() const |
654 | { |
655 | return m_llintExecuteCounter; |
656 | } |
657 | |
658 | typedef HashMap<std::tuple<StructureID, unsigned>, Vector<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap; |
659 | StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; } |
660 | |
661 | // Functions for controlling when tiered compilation kicks in. This |
662 | // controls both when the optimizing compiler is invoked and when OSR |
663 | // entry happens. Two triggers exist: the loop trigger and the return |
664 | // trigger. In either case, when an addition to m_jitExecuteCounter |
665 | // causes it to become non-negative, the optimizing compiler is |
666 | // invoked. This includes a fast check to see if this CodeBlock has |
667 | // already been optimized (i.e. replacement() returns a CodeBlock |
668 | // that was optimized with a higher tier JIT than this one). In the |
669 | // case of the loop trigger, if the optimized compilation succeeds |
670 | // (or has already succeeded in the past) then OSR is attempted to |
671 | // redirect program flow into the optimized code. |
672 | |
673 | // These functions are called from within the optimization triggers, |
674 | // and are used as a single point at which we define the heuristics |
675 | // for how much warm-up is mandated before the next optimization |
676 | // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(), |
677 | // as this is called from the CodeBlock constructor. |
678 | |
679 | // When we observe a lot of speculation failures, we trigger a |
680 | // reoptimization. But each time, we increase the optimization trigger |
681 | // to avoid thrashing. |
682 | JS_EXPORT_PRIVATE unsigned reoptimizationRetryCounter() const; |
683 | void countReoptimization(); |
684 | |
685 | #if !ENABLE(C_LOOP) |
686 | const RegisterAtOffsetList* calleeSaveRegisters() const; |
687 | |
688 | static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return RegisterSet::llintBaselineCalleeSaveRegisters().numberOfSetRegisters(); } |
689 | static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters(); |
690 | size_t calleeSaveSpaceAsVirtualRegisters(); |
691 | #else |
692 | static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; } |
693 | static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 1; }; |
694 | size_t calleeSaveSpaceAsVirtualRegisters() { return 0; } |
695 | #endif |
696 | |
697 | #if ENABLE(JIT) |
698 | unsigned numberOfDFGCompiles(); |
699 | |
700 | int32_t codeTypeThresholdMultiplier() const; |
701 | |
702 | int32_t adjustedCounterValue(int32_t desiredThreshold); |
703 | |
704 | int32_t* addressOfJITExecuteCounter() |
705 | { |
706 | return &m_jitExecuteCounter.m_counter; |
707 | } |
708 | |
709 | static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_counter); } |
710 | static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_activeThreshold); } |
711 | static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(BaselineExecutionCounter, m_totalCount); } |
712 | |
713 | const BaselineExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; } |
714 | |
715 | unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; } |
716 | |
717 | // Check if the optimization threshold has been reached, and if not, |
718 | // adjust the heuristics accordingly. Returns true if the threshold has |
719 | // been reached. |
720 | bool checkIfOptimizationThresholdReached(); |
721 | |
722 | // Call this to force the next optimization trigger to fire. This is |
723 | // rarely wise, since optimization triggers are typically more |
724 | // expensive than executing baseline code. |
725 | void optimizeNextInvocation(); |
726 | |
727 | // Call this to prevent optimization from happening again. Note that |
728 | // optimization will still happen after roughly 2^29 invocations, |
729 | // so this is really meant to delay that as much as possible. This |
730 | // is called if optimization failed, and we expect it to fail in |
731 | // the future as well. |
732 | void dontOptimizeAnytimeSoon(); |
733 | |
734 | // Call this to reinitialize the counter to its starting state, |
735 | // forcing a warm-up to happen before the next optimization trigger |
736 | // fires. This is called in the CodeBlock constructor. It also |
737 | // makes sense to call this if an OSR exit occurred. Note that |
738 | // OSR exit code is code generated, so the value of the execute |
739 | // counter that this corresponds to is also available directly. |
740 | void optimizeAfterWarmUp(); |
741 | |
742 | // Call this to force an optimization trigger to fire only after |
743 | // a lot of warm-up. |
744 | void optimizeAfterLongWarmUp(); |
745 | |
746 | // Call this to cause an optimization trigger to fire soon, but |
747 | // not necessarily the next one. This makes sense if optimization |
748 | // succeeds. Successful optimization means that all calls are |
749 | // relinked to the optimized code, so this only affects call |
750 | // frames that are still executing this CodeBlock. The value here |
751 | // is tuned to strike a balance between the cost of OSR entry |
752 | // (which is too high to warrant making every loop back edge to |
753 | // trigger OSR immediately) and the cost of executing baseline |
754 | // code (which is high enough that we don't necessarily want to |
755 | // have a full warm-up). The intuition for calling this instead of |
756 | // optimizeNextInvocation() is for the case of recursive functions |
757 | // with loops. Consider that there may be N call frames of some |
758 | // recursive function, for a reasonably large value of N. The top |
759 | // one triggers optimization, and then returns, and then all of |
760 | // the others return. We don't want optimization to be triggered on |
761 | // each return, as that would be superfluous. It only makes sense |
762 | // to trigger optimization if one of those functions becomes hot |
763 | // in the baseline code. |
764 | void optimizeSoon(); |
765 | |
766 | void forceOptimizationSlowPathConcurrently(); |
767 | |
768 | void setOptimizationThresholdBasedOnCompilationResult(CompilationResult); |
769 | |
770 | uint32_t osrExitCounter() const { return m_osrExitCounter; } |
771 | |
772 | void countOSRExit() { m_osrExitCounter++; } |
773 | |
774 | enum class OptimizeAction { None, ReoptimizeNow }; |
775 | #if ENABLE(DFG_JIT) |
776 | OptimizeAction updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState&); |
777 | #endif |
778 | |
779 | static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); } |
780 | |
781 | uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold); |
782 | uint32_t exitCountThresholdForReoptimization(); |
783 | uint32_t exitCountThresholdForReoptimizationFromLoop(); |
784 | bool shouldReoptimizeNow(); |
785 | bool shouldReoptimizeFromLoopNow(); |
786 | |
787 | #else // No JIT |
788 | void optimizeAfterWarmUp() { } |
789 | unsigned numberOfDFGCompiles() { return 0; } |
790 | #endif |
791 | |
792 | bool shouldOptimizeNow(); |
793 | void updateAllValueProfilePredictions(); |
794 | void updateAllArrayPredictions(); |
795 | void updateAllPredictions(); |
796 | |
797 | unsigned frameRegisterCount(); |
798 | int stackPointerOffset(); |
799 | |
800 | bool hasOpDebugForLineAndColumn(unsigned line, Optional<unsigned> column); |
801 | |
802 | bool hasDebuggerRequests() const { return m_debuggerRequests; } |
803 | void* debuggerRequestsAddress() { return &m_debuggerRequests; } |
804 | |
805 | void addBreakpoint(unsigned numBreakpoints); |
806 | void removeBreakpoint(unsigned numBreakpoints) |
807 | { |
808 | ASSERT(m_numBreakpoints >= numBreakpoints); |
809 | m_numBreakpoints -= numBreakpoints; |
810 | } |
811 | |
812 | enum SteppingMode { |
813 | SteppingModeDisabled, |
814 | SteppingModeEnabled |
815 | }; |
816 | void setSteppingMode(SteppingMode); |
817 | |
818 | void clearDebuggerRequests() |
819 | { |
820 | m_steppingMode = SteppingModeDisabled; |
821 | m_numBreakpoints = 0; |
822 | } |
823 | |
824 | bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); } |
825 | |
826 | // This is intentionally public; it's the responsibility of anyone doing any |
827 | // of the following to hold the lock: |
828 | // |
829 | // - Modifying any inline cache in this code block. |
830 | // |
831 | // - Quering any inline cache in this code block, from a thread other than |
832 | // the main thread. |
833 | // |
834 | // Additionally, it's only legal to modify the inline cache on the main |
835 | // thread. This means that the main thread can query the inline cache without |
836 | // locking. This is crucial since executing the inline cache is effectively |
837 | // "querying" it. |
838 | // |
839 | // Another exception to the rules is that the GC can do whatever it wants |
840 | // without holding any locks, because the GC is guaranteed to wait until any |
841 | // concurrent compilation threads finish what they're doing. |
842 | mutable ConcurrentJSLock m_lock; |
843 | |
844 | bool m_shouldAlwaysBeInlined; // Not a bitfield because the JIT wants to store to it. |
845 | |
846 | #if ENABLE(JIT) |
847 | unsigned m_capabilityLevelState : 2; // DFG::CapabilityLevel |
848 | #endif |
849 | |
850 | bool m_allTransitionsHaveBeenMarked : 1; // Initialized and used on every GC. |
851 | |
852 | bool m_didFailJITCompilation : 1; |
853 | bool m_didFailFTLCompilation : 1; |
854 | bool m_hasBeenCompiledWithFTL : 1; |
855 | |
856 | // Internal methods for use by validation code. It would be private if it wasn't |
857 | // for the fact that we use it from anonymous namespaces. |
858 | void beginValidationDidFail(); |
859 | NO_RETURN_DUE_TO_CRASH void endValidationDidFail(); |
860 | |
861 | struct RareData { |
862 | WTF_MAKE_FAST_ALLOCATED; |
863 | public: |
864 | Vector<HandlerInfo> m_exceptionHandlers; |
865 | |
866 | // Jump Tables |
867 | Vector<SimpleJumpTable> m_switchJumpTables; |
868 | Vector<StringJumpTable> m_stringSwitchJumpTables; |
869 | |
870 | Vector<std::unique_ptr<ValueProfileAndOperandBuffer>> m_catchProfiles; |
871 | |
872 | DirectEvalCodeCache m_directEvalCodeCache; |
873 | }; |
874 | |
875 | void clearExceptionHandlers() |
876 | { |
877 | if (m_rareData) |
878 | m_rareData->m_exceptionHandlers.clear(); |
879 | } |
880 | |
881 | void appendExceptionHandler(const HandlerInfo& handler) |
882 | { |
883 | createRareDataIfNecessary(); // We may be handling the exception of an inlined call frame. |
884 | m_rareData->m_exceptionHandlers.append(handler); |
885 | } |
886 | |
887 | DisposableCallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite); |
888 | |
889 | void ensureCatchLivenessIsComputedForBytecodeIndex(BytecodeIndex); |
890 | |
891 | bool hasTailCalls() const { return m_unlinkedCode->hasTailCalls(); } |
892 | |
893 | template<typename Metadata> |
894 | Metadata& metadata(OpcodeID opcodeID, unsigned metadataID) |
895 | { |
896 | ASSERT(m_metadata); |
897 | return bitwise_cast<Metadata*>(m_metadata->get(opcodeID))[metadataID]; |
898 | } |
899 | |
900 | size_t metadataSizeInBytes() |
901 | { |
902 | return m_unlinkedCode->metadataSizeInBytes(); |
903 | } |
904 | |
905 | MetadataTable* metadataTable() { return m_metadata.get(); } |
906 | const void* instructionsRawPointer() { return m_instructionsRawPointer; } |
907 | |
908 | protected: |
909 | void finalizeLLIntInlineCaches(); |
910 | #if ENABLE(JIT) |
911 | void finalizeBaselineJITInlineCaches(); |
912 | #endif |
913 | #if ENABLE(DFG_JIT) |
914 | void tallyFrequentExitSites(); |
915 | #else |
916 | void tallyFrequentExitSites() { } |
917 | #endif |
918 | |
919 | private: |
920 | friend class CodeBlockSet; |
921 | friend class ExecutableToCodeBlockEdge; |
922 | |
923 | BytecodeLivenessAnalysis& livenessAnalysisSlow(); |
924 | |
925 | CodeBlock* specialOSREntryBlockOrNull(); |
926 | |
927 | void noticeIncomingCall(CallFrame* callerFrame); |
928 | |
929 | double optimizationThresholdScalingFactor(); |
930 | |
931 | void updateAllValueProfilePredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles); |
932 | |
933 | void setConstantIdentifierSetRegisters(VM&, const Vector<ConstantIdentifierSetEntry>& constants); |
934 | |
935 | void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants, const Vector<SourceCodeRepresentation>& constantsSourceCodeRepresentation, ScriptExecutable* topLevelExecutable); |
936 | |
937 | void replaceConstant(int index, JSValue value) |
938 | { |
939 | ASSERT(isConstantRegisterIndex(index) && static_cast<size_t>(index - FirstConstantRegisterIndex) < m_constantRegisters.size()); |
940 | m_constantRegisters[index - FirstConstantRegisterIndex].set(*m_vm, this, value); |
941 | } |
942 | |
943 | bool shouldVisitStrongly(const ConcurrentJSLocker&); |
944 | bool shouldJettisonDueToWeakReference(VM&); |
945 | bool shouldJettisonDueToOldAge(const ConcurrentJSLocker&); |
946 | |
947 | void propagateTransitions(const ConcurrentJSLocker&, SlotVisitor&); |
948 | void determineLiveness(const ConcurrentJSLocker&, SlotVisitor&); |
949 | |
950 | void stronglyVisitStrongReferences(const ConcurrentJSLocker&, SlotVisitor&); |
951 | void stronglyVisitWeakReferences(const ConcurrentJSLocker&, SlotVisitor&); |
952 | void visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor&); |
953 | |
954 | unsigned numberOfNonArgumentValueProfiles() { return m_numberOfNonArgumentValueProfiles; } |
955 | unsigned totalNumberOfValueProfiles() { return numberOfArgumentValueProfiles() + numberOfNonArgumentValueProfiles(); } |
956 | ValueProfile* tryGetValueProfileForBytecodeIndex(BytecodeIndex); |
957 | |
958 | Seconds timeSinceCreation() |
959 | { |
960 | return MonotonicTime::now() - m_creationTime; |
961 | } |
962 | |
963 | void createRareDataIfNecessary() |
964 | { |
965 | if (!m_rareData) { |
966 | auto rareData = makeUnique<RareData>(); |
967 | WTF::storeStoreFence(); // m_catchProfiles can be touched from compiler threads. |
968 | m_rareData = WTFMove(rareData); |
969 | } |
970 | } |
971 | |
972 | void insertBasicBlockBoundariesForControlFlowProfiler(); |
973 | void ensureCatchLivenessIsComputedForBytecodeIndexSlow(const OpCatch&, BytecodeIndex); |
974 | |
975 | int m_numCalleeLocals; |
976 | int m_numVars; |
977 | int m_numParameters; |
978 | int m_numberOfArgumentsToSkip { 0 }; |
979 | unsigned m_numberOfNonArgumentValueProfiles { 0 }; |
980 | union { |
981 | unsigned m_debuggerRequests; |
982 | struct { |
983 | unsigned m_hasDebuggerStatement : 1; |
984 | unsigned m_steppingMode : 1; |
985 | unsigned m_numBreakpoints : 30; |
986 | }; |
987 | }; |
988 | unsigned m_bytecodeCost { 0 }; |
989 | VirtualRegister m_scopeRegister; |
990 | mutable CodeBlockHash m_hash; |
991 | |
992 | WriteBarrier<UnlinkedCodeBlock> m_unlinkedCode; |
993 | WriteBarrier<ScriptExecutable> m_ownerExecutable; |
994 | WriteBarrier<ExecutableToCodeBlockEdge> m_ownerEdge; |
995 | // m_vm must be a pointer (instead of a reference) because the JSCLLIntOffsetsExtractor |
996 | // cannot handle it being a reference. |
997 | VM* m_vm; |
998 | |
999 | const void* m_instructionsRawPointer { nullptr }; |
1000 | SentinelLinkedList<LLIntCallLinkInfo, PackedRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls; |
1001 | StructureWatchpointMap m_llintGetByIdWatchpointMap; |
1002 | RefPtr<JITCode> m_jitCode; |
1003 | #if ENABLE(JIT) |
1004 | std::unique_ptr<JITData> m_jitData; |
1005 | #endif |
1006 | #if ENABLE(DFG_JIT) |
1007 | // This is relevant to non-DFG code blocks that serve as the profiled code block |
1008 | // for DFG code blocks. |
1009 | CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles; |
1010 | #endif |
1011 | RefCountedArray<ValueProfile> m_argumentValueProfiles; |
1012 | |
1013 | // Constant Pool |
1014 | COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown); |
1015 | // TODO: This could just be a pointer to m_unlinkedCodeBlock's data, but the DFG mutates |
1016 | // it, so we're stuck with it for now. |
1017 | Vector<WriteBarrier<Unknown>> m_constantRegisters; |
1018 | Vector<SourceCodeRepresentation> m_constantsSourceCodeRepresentation; |
1019 | RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionDecls; |
1020 | RefCountedArray<WriteBarrier<FunctionExecutable>> m_functionExprs; |
1021 | |
1022 | WriteBarrier<CodeBlock> m_alternative; |
1023 | |
1024 | BaselineExecutionCounter m_llintExecuteCounter; |
1025 | |
1026 | BaselineExecutionCounter m_jitExecuteCounter; |
1027 | uint32_t m_osrExitCounter; |
1028 | |
1029 | uint16_t m_optimizationDelayCounter; |
1030 | uint16_t m_reoptimizationRetryCounter; |
1031 | |
1032 | RefPtr<MetadataTable> m_metadata; |
1033 | |
1034 | MonotonicTime m_creationTime; |
1035 | double m_previousCounter { 0 }; |
1036 | |
1037 | std::unique_ptr<RareData> m_rareData; |
1038 | }; |
1039 | |
1040 | template <typename ExecutableType> |
1041 | Exception* ScriptExecutable::prepareForExecution(VM& vm, JSFunction* function, JSScope* scope, CodeSpecializationKind kind, CodeBlock*& resultCodeBlock) |
1042 | { |
1043 | if (hasJITCodeFor(kind)) { |
1044 | if (std::is_same<ExecutableType, EvalExecutable>::value) |
1045 | resultCodeBlock = jsCast<CodeBlock*>(jsCast<EvalExecutable*>(this)->codeBlock()); |
1046 | else if (std::is_same<ExecutableType, ProgramExecutable>::value) |
1047 | resultCodeBlock = jsCast<CodeBlock*>(jsCast<ProgramExecutable*>(this)->codeBlock()); |
1048 | else if (std::is_same<ExecutableType, ModuleProgramExecutable>::value) |
1049 | resultCodeBlock = jsCast<CodeBlock*>(jsCast<ModuleProgramExecutable*>(this)->codeBlock()); |
1050 | else if (std::is_same<ExecutableType, FunctionExecutable>::value) |
1051 | resultCodeBlock = jsCast<CodeBlock*>(jsCast<FunctionExecutable*>(this)->codeBlockFor(kind)); |
1052 | else |
1053 | RELEASE_ASSERT_NOT_REACHED(); |
1054 | return nullptr; |
1055 | } |
1056 | return prepareForExecutionImpl(vm, function, scope, kind, resultCodeBlock); |
1057 | } |
1058 | |
1059 | #define CODEBLOCK_LOG_EVENT(codeBlock, summary, details) \ |
1060 | do { \ |
1061 | if (codeBlock) \ |
1062 | (codeBlock->vm().logEvent(codeBlock, summary, [&] () { return toCString details; })); \ |
1063 | } while (0) |
1064 | |
1065 | |
1066 | void setPrinter(Printer::PrintRecord&, CodeBlock*); |
1067 | |
1068 | } // namespace JSC |
1069 | |
1070 | namespace WTF { |
1071 | |
1072 | JS_EXPORT_PRIVATE void printInternal(PrintStream&, JSC::CodeBlock*); |
1073 | |
1074 | } // namespace WTF |
1075 | |