1 | /* |
2 | * Copyright (C) 2011-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "DFGSpeculativeJIT.h" |
28 | |
29 | #if ENABLE(DFG_JIT) |
30 | |
31 | #include "ArrayPrototype.h" |
32 | #include "AtomicsObject.h" |
33 | #include "CallFrameShuffler.h" |
34 | #include "DFGAbstractInterpreterInlines.h" |
35 | #include "DFGCallArrayAllocatorSlowPathGenerator.h" |
36 | #include "DFGDoesGC.h" |
37 | #include "DFGOperations.h" |
38 | #include "DFGSlowPathGenerator.h" |
39 | #include "DateInstance.h" |
40 | #include "DirectArguments.h" |
41 | #include "GetterSetter.h" |
42 | #include "HasOwnPropertyCache.h" |
43 | #include "JSCInlines.h" |
44 | #include "JSLexicalEnvironment.h" |
45 | #include "JSMap.h" |
46 | #include "JSPropertyNameEnumerator.h" |
47 | #include "JSSet.h" |
48 | #include "ObjectPrototype.h" |
49 | #include "SetupVarargsFrame.h" |
50 | #include "SpillRegistersMode.h" |
51 | #include "StringPrototype.h" |
52 | #include "SuperSampler.h" |
53 | #include "Watchdog.h" |
54 | |
55 | namespace JSC { namespace DFG { |
56 | |
57 | #if USE(JSVALUE64) |
58 | |
59 | void SpeculativeJIT::boxInt52(GPRReg sourceGPR, GPRReg targetGPR, DataFormat format) |
60 | { |
61 | GPRReg tempGPR; |
62 | if (sourceGPR == targetGPR) |
63 | tempGPR = allocate(); |
64 | else |
65 | tempGPR = targetGPR; |
66 | |
67 | FPRReg fpr = fprAllocate(); |
68 | |
69 | if (format == DataFormatInt52) |
70 | m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), sourceGPR); |
71 | else |
72 | ASSERT(format == DataFormatStrictInt52); |
73 | |
74 | m_jit.boxInt52(sourceGPR, targetGPR, tempGPR, fpr); |
75 | |
76 | if (format == DataFormatInt52 && sourceGPR != targetGPR) |
77 | m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), sourceGPR); |
78 | |
79 | if (tempGPR != targetGPR) |
80 | unlock(tempGPR); |
81 | |
82 | unlock(fpr); |
83 | } |
84 | |
85 | GPRReg SpeculativeJIT::fillJSValue(Edge edge) |
86 | { |
87 | VirtualRegister virtualRegister = edge->virtualRegister(); |
88 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
89 | |
90 | switch (info.registerFormat()) { |
91 | case DataFormatNone: { |
92 | GPRReg gpr = allocate(); |
93 | |
94 | if (edge->hasConstant()) { |
95 | JSValue jsValue = edge->asJSValue(); |
96 | m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr); |
97 | info.fillJSValue(*m_stream, gpr, DataFormatJS); |
98 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); |
99 | } else { |
100 | DataFormat spillFormat = info.spillFormat(); |
101 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); |
102 | switch (spillFormat) { |
103 | case DataFormatInt32: { |
104 | m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); |
105 | m_jit.or64(GPRInfo::numberTagRegister, gpr); |
106 | spillFormat = DataFormatJSInt32; |
107 | break; |
108 | } |
109 | |
110 | default: |
111 | m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); |
112 | DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat & DataFormatJS, spillFormat); |
113 | break; |
114 | } |
115 | info.fillJSValue(*m_stream, gpr, spillFormat); |
116 | } |
117 | return gpr; |
118 | } |
119 | |
120 | case DataFormatInt32: { |
121 | GPRReg gpr = info.gpr(); |
122 | // If the register has already been locked we need to take a copy. |
123 | // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32. |
124 | if (m_gprs.isLocked(gpr)) { |
125 | GPRReg result = allocate(); |
126 | m_jit.or64(GPRInfo::numberTagRegister, gpr, result); |
127 | return result; |
128 | } |
129 | m_gprs.lock(gpr); |
130 | m_jit.or64(GPRInfo::numberTagRegister, gpr); |
131 | info.fillJSValue(*m_stream, gpr, DataFormatJSInt32); |
132 | return gpr; |
133 | } |
134 | |
135 | case DataFormatCell: |
136 | // No retag required on JSVALUE64! |
137 | case DataFormatJS: |
138 | case DataFormatJSInt32: |
139 | case DataFormatJSDouble: |
140 | case DataFormatJSCell: |
141 | case DataFormatJSBoolean: { |
142 | GPRReg gpr = info.gpr(); |
143 | m_gprs.lock(gpr); |
144 | return gpr; |
145 | } |
146 | |
147 | case DataFormatBoolean: |
148 | case DataFormatStorage: |
149 | case DataFormatDouble: |
150 | case DataFormatInt52: |
151 | // this type currently never occurs |
152 | DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format" ); |
153 | |
154 | default: |
155 | DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format" ); |
156 | return InvalidGPRReg; |
157 | } |
158 | } |
159 | |
160 | void SpeculativeJIT::cachedGetById(CodeOrigin origin, JSValueRegs base, JSValueRegs result, unsigned identifierNumber, JITCompiler::Jump slowPathTarget , SpillRegistersMode mode, AccessType type) |
161 | { |
162 | cachedGetById(origin, base.gpr(), result.gpr(), identifierNumber, slowPathTarget, mode, type); |
163 | } |
164 | |
165 | void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode, AccessType type) |
166 | { |
167 | CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); |
168 | RegisterSet usedRegisters = this->usedRegisters(); |
169 | if (spillMode == DontSpill) { |
170 | // We've already flushed registers to the stack, we don't need to spill these. |
171 | usedRegisters.set(baseGPR, false); |
172 | usedRegisters.set(resultGPR, false); |
173 | } |
174 | JITGetByIdGenerator gen( |
175 | m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), |
176 | JSValueRegs(baseGPR), JSValueRegs(resultGPR), type); |
177 | gen.generateFastPath(m_jit); |
178 | |
179 | JITCompiler::JumpList slowCases; |
180 | slowCases.append(slowPathTarget); |
181 | slowCases.append(gen.slowPathJump()); |
182 | |
183 | std::unique_ptr<SlowPathGenerator> slowPath = slowPathCall( |
184 | slowCases, this, appropriateOptimizingGetByIdFunction(type), |
185 | spillMode, ExceptionCheckRequirement::CheckNeeded, |
186 | resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), baseGPR, identifierUID(identifierNumber)); |
187 | |
188 | m_jit.addGetById(gen, slowPath.get()); |
189 | addSlowPathGenerator(WTFMove(slowPath)); |
190 | } |
191 | |
192 | void SpeculativeJIT::cachedGetByIdWithThis(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg thisGPR, GPRReg resultGPR, unsigned identifierNumber, const JITCompiler::JumpList& slowPathTarget) |
193 | { |
194 | CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); |
195 | RegisterSet usedRegisters = this->usedRegisters(); |
196 | // We've already flushed registers to the stack, we don't need to spill these. |
197 | usedRegisters.set(baseGPR, false); |
198 | usedRegisters.set(thisGPR, false); |
199 | usedRegisters.set(resultGPR, false); |
200 | |
201 | JITGetByIdWithThisGenerator gen( |
202 | m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), |
203 | JSValueRegs(resultGPR), JSValueRegs(baseGPR), JSValueRegs(thisGPR)); |
204 | gen.generateFastPath(m_jit); |
205 | |
206 | JITCompiler::JumpList slowCases; |
207 | slowCases.append(slowPathTarget); |
208 | slowCases.append(gen.slowPathJump()); |
209 | |
210 | std::unique_ptr<SlowPathGenerator> slowPath = slowPathCall( |
211 | slowCases, this, operationGetByIdWithThisOptimize, |
212 | DontSpill, ExceptionCheckRequirement::CheckNeeded, |
213 | resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), baseGPR, thisGPR, identifierUID(identifierNumber)); |
214 | |
215 | m_jit.addGetByIdWithThis(gen, slowPath.get()); |
216 | addSlowPathGenerator(WTFMove(slowPath)); |
217 | } |
218 | |
219 | void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand) |
220 | { |
221 | JSValueOperand arg(this, operand, ManualOperandSpeculation); |
222 | GPRReg argGPR = arg.gpr(); |
223 | |
224 | GPRTemporary result(this); |
225 | GPRReg resultGPR = result.gpr(); |
226 | |
227 | m_jit.move(TrustedImm32(0), resultGPR); |
228 | |
229 | JITCompiler::JumpList done; |
230 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { |
231 | if (!isKnownNotCell(operand.node())) |
232 | done.append(m_jit.branchIfCell(JSValueRegs(argGPR))); |
233 | } else { |
234 | GPRTemporary localGlobalObject(this); |
235 | GPRTemporary remoteGlobalObject(this); |
236 | GPRTemporary scratch(this); |
237 | |
238 | JITCompiler::Jump notCell; |
239 | if (!isKnownCell(operand.node())) |
240 | notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR)); |
241 | |
242 | JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8( |
243 | JITCompiler::Zero, |
244 | JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()), |
245 | JITCompiler::TrustedImm32(MasqueradesAsUndefined)); |
246 | done.append(isNotMasqueradesAsUndefined); |
247 | |
248 | GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); |
249 | GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); |
250 | m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); |
251 | m_jit.emitLoadStructure(vm(), argGPR, resultGPR, scratch.gpr()); |
252 | m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); |
253 | m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR); |
254 | done.append(m_jit.jump()); |
255 | if (!isKnownCell(operand.node())) |
256 | notCell.link(&m_jit); |
257 | } |
258 | |
259 | if (!isKnownNotOther(operand.node())) { |
260 | m_jit.move(argGPR, resultGPR); |
261 | m_jit.and64(JITCompiler::TrustedImm32(~JSValue::UndefinedTag), resultGPR); |
262 | m_jit.compare64(JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::ValueNull), resultGPR); |
263 | } |
264 | |
265 | done.link(&m_jit); |
266 | |
267 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), resultGPR); |
268 | jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean); |
269 | } |
270 | |
271 | void SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode) |
272 | { |
273 | BasicBlock* taken = branchNode->branchData()->taken.block; |
274 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; |
275 | |
276 | JSValueOperand arg(this, operand, ManualOperandSpeculation); |
277 | GPRReg argGPR = arg.gpr(); |
278 | |
279 | GPRTemporary result(this, Reuse, arg); |
280 | GPRReg resultGPR = result.gpr(); |
281 | |
282 | // First, handle the case where "operand" is a cell. |
283 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { |
284 | if (!isKnownNotCell(operand.node())) { |
285 | JITCompiler::Jump isCell = m_jit.branchIfCell(JSValueRegs(argGPR)); |
286 | addBranch(isCell, notTaken); |
287 | } |
288 | } else { |
289 | GPRTemporary localGlobalObject(this); |
290 | GPRTemporary remoteGlobalObject(this); |
291 | GPRTemporary scratch(this); |
292 | |
293 | JITCompiler::Jump notCell; |
294 | if (!isKnownCell(operand.node())) |
295 | notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR)); |
296 | |
297 | branchTest8(JITCompiler::Zero, |
298 | JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()), |
299 | JITCompiler::TrustedImm32(MasqueradesAsUndefined), notTaken); |
300 | |
301 | GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); |
302 | GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); |
303 | m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); |
304 | m_jit.emitLoadStructure(vm(), argGPR, resultGPR, scratch.gpr()); |
305 | m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); |
306 | branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, taken); |
307 | |
308 | if (!isKnownCell(operand.node())) { |
309 | jump(notTaken, ForceJump); |
310 | notCell.link(&m_jit); |
311 | } |
312 | } |
313 | |
314 | if (isKnownNotOther(operand.node())) |
315 | jump(notTaken); |
316 | else { |
317 | JITCompiler::RelationalCondition condition = JITCompiler::Equal; |
318 | if (taken == nextBlock()) { |
319 | condition = JITCompiler::NotEqual; |
320 | std::swap(taken, notTaken); |
321 | } |
322 | m_jit.move(argGPR, resultGPR); |
323 | m_jit.and64(JITCompiler::TrustedImm32(~JSValue::UndefinedTag), resultGPR); |
324 | branch64(condition, resultGPR, JITCompiler::TrustedImm64(JSValue::ValueNull), taken); |
325 | jump(notTaken); |
326 | } |
327 | } |
328 | |
329 | void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert) |
330 | { |
331 | BasicBlock* taken = branchNode->branchData()->taken.block; |
332 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; |
333 | |
334 | // The branch instruction will branch to the taken block. |
335 | // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. |
336 | if (taken == nextBlock()) { |
337 | invert = !invert; |
338 | BasicBlock* tmp = taken; |
339 | taken = notTaken; |
340 | notTaken = tmp; |
341 | } |
342 | |
343 | JSValueOperand arg1(this, node->child1()); |
344 | JSValueOperand arg2(this, node->child2()); |
345 | GPRReg arg1GPR = arg1.gpr(); |
346 | GPRReg arg2GPR = arg2.gpr(); |
347 | |
348 | GPRTemporary result(this); |
349 | GPRReg resultGPR = result.gpr(); |
350 | |
351 | arg1.use(); |
352 | arg2.use(); |
353 | |
354 | if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) { |
355 | // see if we get lucky: if the arguments are cells and they reference the same |
356 | // cell, then they must be strictly equal. |
357 | branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken); |
358 | |
359 | silentSpillAllRegisters(resultGPR); |
360 | callOperation(operationCompareStrictEqCell, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1GPR, arg2GPR); |
361 | silentFillAllRegisters(); |
362 | m_jit.exceptionCheck(); |
363 | |
364 | branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken); |
365 | } else { |
366 | m_jit.or64(arg1GPR, arg2GPR, resultGPR); |
367 | |
368 | JITCompiler::Jump twoCellsCase = m_jit.branchIfCell(resultGPR); |
369 | |
370 | JITCompiler::Jump leftOK = m_jit.branchIfInt32(arg1GPR); |
371 | JITCompiler::Jump leftDouble = m_jit.branchIfNumber(arg1GPR); |
372 | leftOK.link(&m_jit); |
373 | JITCompiler::Jump rightOK = m_jit.branchIfInt32(arg2GPR); |
374 | JITCompiler::Jump rightDouble = m_jit.branchIfNumber(arg2GPR); |
375 | rightOK.link(&m_jit); |
376 | |
377 | branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken); |
378 | jump(notTaken, ForceJump); |
379 | |
380 | twoCellsCase.link(&m_jit); |
381 | branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken); |
382 | |
383 | leftDouble.link(&m_jit); |
384 | rightDouble.link(&m_jit); |
385 | |
386 | silentSpillAllRegisters(resultGPR); |
387 | callOperation(operationCompareStrictEq, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1GPR, arg2GPR); |
388 | silentFillAllRegisters(); |
389 | m_jit.exceptionCheck(); |
390 | |
391 | branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken); |
392 | } |
393 | |
394 | jump(notTaken); |
395 | } |
396 | |
397 | void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) |
398 | { |
399 | JSValueOperand arg1(this, node->child1()); |
400 | JSValueOperand arg2(this, node->child2()); |
401 | JSValueRegs arg1Regs = arg1.jsValueRegs(); |
402 | JSValueRegs arg2Regs = arg2.jsValueRegs(); |
403 | |
404 | GPRTemporary result(this); |
405 | GPRReg resultGPR = result.gpr(); |
406 | |
407 | arg1.use(); |
408 | arg2.use(); |
409 | |
410 | if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) { |
411 | // see if we get lucky: if the arguments are cells and they reference the same |
412 | // cell, then they must be strictly equal. |
413 | // FIXME: this should flush registers instead of silent spill/fill. |
414 | JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1Regs.gpr(), arg2Regs.gpr()); |
415 | |
416 | m_jit.move(JITCompiler::TrustedImm64(!invert), resultGPR); |
417 | |
418 | JITCompiler::Jump done = m_jit.jump(); |
419 | |
420 | notEqualCase.link(&m_jit); |
421 | |
422 | silentSpillAllRegisters(resultGPR); |
423 | callOperation(operationCompareStrictEqCell, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1Regs, arg2Regs); |
424 | silentFillAllRegisters(); |
425 | m_jit.exceptionCheck(); |
426 | |
427 | done.link(&m_jit); |
428 | unblessedBooleanResult(resultGPR, m_currentNode, UseChildrenCalledExplicitly); |
429 | return; |
430 | } |
431 | |
432 | m_jit.or64(arg1Regs.gpr(), arg2Regs.gpr(), resultGPR); |
433 | |
434 | JITCompiler::JumpList slowPathCases; |
435 | |
436 | JITCompiler::Jump twoCellsCase = m_jit.branchIfCell(resultGPR); |
437 | |
438 | JITCompiler::Jump leftOK = m_jit.branchIfInt32(arg1Regs); |
439 | slowPathCases.append(m_jit.branchIfNumber(arg1Regs, InvalidGPRReg)); |
440 | leftOK.link(&m_jit); |
441 | JITCompiler::Jump rightOK = m_jit.branchIfInt32(arg2Regs); |
442 | slowPathCases.append(m_jit.branchIfNumber(arg2Regs, InvalidGPRReg)); |
443 | rightOK.link(&m_jit); |
444 | |
445 | m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1Regs.gpr(), arg2Regs.gpr(), resultGPR); |
446 | |
447 | JITCompiler::Jump done = m_jit.jump(); |
448 | |
449 | twoCellsCase.link(&m_jit); |
450 | slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1Regs.gpr(), arg2Regs.gpr())); |
451 | |
452 | m_jit.move(JITCompiler::TrustedImm64(!invert), resultGPR); |
453 | |
454 | addSlowPathGenerator(slowPathCall(slowPathCases, this, operationCompareStrictEq, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1Regs, arg2Regs)); |
455 | |
456 | done.link(&m_jit); |
457 | |
458 | unblessedBooleanResult(resultGPR, m_currentNode, UseChildrenCalledExplicitly); |
459 | } |
460 | |
461 | void SpeculativeJIT::emitCall(Node* node) |
462 | { |
463 | CallLinkInfo::CallType callType; |
464 | bool isVarargs = false; |
465 | bool isForwardVarargs = false; |
466 | bool isTail = false; |
467 | bool isEmulatedTail = false; |
468 | bool isDirect = false; |
469 | switch (node->op()) { |
470 | case Call: |
471 | case CallEval: |
472 | callType = CallLinkInfo::Call; |
473 | break; |
474 | case TailCall: |
475 | callType = CallLinkInfo::TailCall; |
476 | isTail = true; |
477 | break; |
478 | case TailCallInlinedCaller: |
479 | callType = CallLinkInfo::Call; |
480 | isEmulatedTail = true; |
481 | break; |
482 | case Construct: |
483 | callType = CallLinkInfo::Construct; |
484 | break; |
485 | case CallVarargs: |
486 | callType = CallLinkInfo::CallVarargs; |
487 | isVarargs = true; |
488 | break; |
489 | case TailCallVarargs: |
490 | callType = CallLinkInfo::TailCallVarargs; |
491 | isVarargs = true; |
492 | isTail = true; |
493 | break; |
494 | case TailCallVarargsInlinedCaller: |
495 | callType = CallLinkInfo::CallVarargs; |
496 | isVarargs = true; |
497 | isEmulatedTail = true; |
498 | break; |
499 | case ConstructVarargs: |
500 | callType = CallLinkInfo::ConstructVarargs; |
501 | isVarargs = true; |
502 | break; |
503 | case CallForwardVarargs: |
504 | callType = CallLinkInfo::CallVarargs; |
505 | isForwardVarargs = true; |
506 | break; |
507 | case ConstructForwardVarargs: |
508 | callType = CallLinkInfo::ConstructVarargs; |
509 | isForwardVarargs = true; |
510 | break; |
511 | case TailCallForwardVarargs: |
512 | callType = CallLinkInfo::TailCallVarargs; |
513 | isTail = true; |
514 | isForwardVarargs = true; |
515 | break; |
516 | case TailCallForwardVarargsInlinedCaller: |
517 | callType = CallLinkInfo::CallVarargs; |
518 | isEmulatedTail = true; |
519 | isForwardVarargs = true; |
520 | break; |
521 | case DirectCall: |
522 | callType = CallLinkInfo::DirectCall; |
523 | isDirect = true; |
524 | break; |
525 | case DirectConstruct: |
526 | callType = CallLinkInfo::DirectConstruct; |
527 | isDirect = true; |
528 | break; |
529 | case DirectTailCall: |
530 | callType = CallLinkInfo::DirectTailCall; |
531 | isTail = true; |
532 | isDirect = true; |
533 | break; |
534 | case DirectTailCallInlinedCaller: |
535 | callType = CallLinkInfo::DirectCall; |
536 | isEmulatedTail = true; |
537 | isDirect = true; |
538 | break; |
539 | default: |
540 | DFG_CRASH(m_jit.graph(), node, "bad node type" ); |
541 | break; |
542 | } |
543 | |
544 | GPRReg calleeGPR = InvalidGPRReg; |
545 | CallFrameShuffleData shuffleData; |
546 | |
547 | JSGlobalObject* globalObject = m_graph.globalObjectFor(node->origin.semantic); |
548 | ExecutableBase* executable = nullptr; |
549 | FunctionExecutable* functionExecutable = nullptr; |
550 | if (isDirect) { |
551 | executable = node->castOperand<ExecutableBase*>(); |
552 | functionExecutable = jsDynamicCast<FunctionExecutable*>(vm(), executable); |
553 | } |
554 | |
555 | unsigned numPassedArgs = 0; |
556 | unsigned numAllocatedArgs = 0; |
557 | |
558 | // Gotta load the arguments somehow. Varargs is trickier. |
559 | if (isVarargs || isForwardVarargs) { |
560 | RELEASE_ASSERT(!isDirect); |
561 | CallVarargsData* data = node->callVarargsData(); |
562 | |
563 | int numUsedStackSlots = m_jit.graph().m_nextMachineLocal; |
564 | |
565 | if (isForwardVarargs) { |
566 | flushRegisters(); |
567 | if (node->child3()) |
568 | use(node->child3()); |
569 | |
570 | GPRReg scratchGPR1; |
571 | GPRReg scratchGPR2; |
572 | GPRReg scratchGPR3; |
573 | |
574 | scratchGPR1 = JITCompiler::selectScratchGPR(); |
575 | scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1); |
576 | scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2); |
577 | |
578 | m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2); |
579 | JITCompiler::JumpList slowCase; |
580 | InlineCallFrame* inlineCallFrame; |
581 | if (node->child3()) |
582 | inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame(); |
583 | else |
584 | inlineCallFrame = node->origin.semantic.inlineCallFrame(); |
585 | // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds. |
586 | emitSetupVarargsFrameFastCase(vm(), m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase); |
587 | JITCompiler::Jump done = m_jit.jump(); |
588 | slowCase.link(&m_jit); |
589 | callOperation(operationThrowStackOverflowForVarargs, TrustedImmPtr::weakPointer(m_graph, globalObject)); |
590 | m_jit.exceptionCheck(); |
591 | m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow); |
592 | done.link(&m_jit); |
593 | } else { |
594 | GPRReg argumentsGPR; |
595 | GPRReg scratchGPR1; |
596 | GPRReg scratchGPR2; |
597 | GPRReg scratchGPR3; |
598 | |
599 | auto loadArgumentsGPR = [&] (GPRReg reservedGPR) { |
600 | if (reservedGPR != InvalidGPRReg) |
601 | lock(reservedGPR); |
602 | JSValueOperand arguments(this, node->child3()); |
603 | argumentsGPR = arguments.gpr(); |
604 | if (reservedGPR != InvalidGPRReg) |
605 | unlock(reservedGPR); |
606 | flushRegisters(); |
607 | |
608 | scratchGPR1 = JITCompiler::selectScratchGPR(argumentsGPR, reservedGPR); |
609 | scratchGPR2 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, reservedGPR); |
610 | scratchGPR3 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, scratchGPR2, reservedGPR); |
611 | }; |
612 | |
613 | loadArgumentsGPR(InvalidGPRReg); |
614 | |
615 | DFG_ASSERT(m_jit.graph(), node, isFlushed()); |
616 | |
617 | // Right now, arguments is in argumentsGPR and the register file is flushed. |
618 | callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentsGPR, numUsedStackSlots, data->firstVarArgOffset); |
619 | m_jit.exceptionCheck(); |
620 | |
621 | // Now we have the argument count of the callee frame, but we've lost the arguments operand. |
622 | // Reconstruct the arguments operand while preserving the callee frame. |
623 | loadArgumentsGPR(GPRInfo::returnValueGPR); |
624 | m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1); |
625 | emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1); |
626 | m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister); |
627 | |
628 | callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), scratchGPR1, argumentsGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR); |
629 | m_jit.exceptionCheck(); |
630 | m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, JITCompiler::stackPointerRegister); |
631 | } |
632 | |
633 | DFG_ASSERT(m_jit.graph(), node, isFlushed()); |
634 | |
635 | // We don't need the arguments array anymore. |
636 | if (isVarargs) |
637 | use(node->child3()); |
638 | |
639 | // Now set up the "this" argument. |
640 | JSValueOperand thisArgument(this, node->child2()); |
641 | GPRReg thisArgumentGPR = thisArgument.gpr(); |
642 | thisArgument.use(); |
643 | |
644 | m_jit.store64(thisArgumentGPR, JITCompiler::calleeArgumentSlot(0)); |
645 | } else { |
646 | // The call instruction's first child is the function; the subsequent children are the |
647 | // arguments. |
648 | numPassedArgs = node->numChildren() - 1; |
649 | numAllocatedArgs = numPassedArgs; |
650 | |
651 | if (functionExecutable) { |
652 | // Allocate more args if this would let us avoid arity checks. This is throttled by |
653 | // CallLinkInfo's limit. It's probably good to throttle it - if the callee wants a |
654 | // ginormous amount of argument space then it's better for them to do it so that when we |
655 | // make calls to other things, we don't waste space. |
656 | unsigned desiredNumAllocatedArgs = static_cast<unsigned>(functionExecutable->parameterCount()) + 1; |
657 | if (desiredNumAllocatedArgs <= Options::maximumDirectCallStackSize()) { |
658 | numAllocatedArgs = std::max(numAllocatedArgs, desiredNumAllocatedArgs); |
659 | |
660 | // Whoever converts to DirectCall should do this adjustment. It's too late for us to |
661 | // do this adjustment now since we will have already emitted code that relied on the |
662 | // value of m_parameterSlots. |
663 | DFG_ASSERT( |
664 | m_jit.graph(), node, |
665 | Graph::parameterSlotsForArgCount(numAllocatedArgs) |
666 | <= m_jit.graph().m_parameterSlots); |
667 | } |
668 | } |
669 | |
670 | if (isTail) { |
671 | Edge calleeEdge = m_jit.graph().child(node, 0); |
672 | JSValueOperand callee(this, calleeEdge); |
673 | calleeGPR = callee.gpr(); |
674 | if (!isDirect) |
675 | callee.use(); |
676 | |
677 | shuffleData.numberTagRegister = GPRInfo::numberTagRegister; |
678 | shuffleData.numLocals = m_jit.graph().frameRegisterCount(); |
679 | shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS); |
680 | shuffleData.args.resize(numAllocatedArgs); |
681 | shuffleData.numPassedArgs = numPassedArgs; |
682 | |
683 | for (unsigned i = 0; i < numPassedArgs; ++i) { |
684 | Edge argEdge = m_jit.graph().varArgChild(node, i + 1); |
685 | GenerationInfo& info = generationInfo(argEdge.node()); |
686 | if (!isDirect) |
687 | use(argEdge); |
688 | shuffleData.args[i] = info.recovery(argEdge->virtualRegister()); |
689 | } |
690 | |
691 | for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i) |
692 | shuffleData.args[i] = ValueRecovery::constant(jsUndefined()); |
693 | |
694 | shuffleData.setupCalleeSaveRegisters(m_jit.codeBlock()); |
695 | } else { |
696 | m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), JITCompiler::calleeFramePayloadSlot(CallFrameSlot::argumentCount)); |
697 | |
698 | for (unsigned i = 0; i < numPassedArgs; i++) { |
699 | Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i]; |
700 | JSValueOperand arg(this, argEdge); |
701 | GPRReg argGPR = arg.gpr(); |
702 | use(argEdge); |
703 | |
704 | m_jit.store64(argGPR, JITCompiler::calleeArgumentSlot(i)); |
705 | } |
706 | |
707 | for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i) |
708 | m_jit.storeTrustedValue(jsUndefined(), JITCompiler::calleeArgumentSlot(i)); |
709 | } |
710 | } |
711 | |
712 | if (!isTail || isVarargs || isForwardVarargs) { |
713 | Edge calleeEdge = m_jit.graph().child(node, 0); |
714 | JSValueOperand callee(this, calleeEdge); |
715 | calleeGPR = callee.gpr(); |
716 | callee.use(); |
717 | m_jit.store64(calleeGPR, JITCompiler::calleeFrameSlot(CallFrameSlot::callee)); |
718 | |
719 | flushRegisters(); |
720 | } |
721 | |
722 | CodeOrigin staticOrigin = node->origin.semantic; |
723 | InlineCallFrame* staticInlineCallFrame = staticOrigin.inlineCallFrame(); |
724 | ASSERT(!isTail || !staticInlineCallFrame || !staticInlineCallFrame->getCallerSkippingTailCalls()); |
725 | ASSERT(!isEmulatedTail || (staticInlineCallFrame && staticInlineCallFrame->getCallerSkippingTailCalls())); |
726 | CodeOrigin dynamicOrigin = |
727 | isEmulatedTail ? *staticInlineCallFrame->getCallerSkippingTailCalls() : staticOrigin; |
728 | |
729 | CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(dynamicOrigin, m_stream->size()); |
730 | |
731 | auto setResultAndResetStack = [&] () { |
732 | GPRFlushedCallResult result(this); |
733 | GPRReg resultGPR = result.gpr(); |
734 | m_jit.move(GPRInfo::returnValueGPR, resultGPR); |
735 | |
736 | jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly); |
737 | |
738 | // After the calls are done, we need to reestablish our stack |
739 | // pointer. We rely on this for varargs calls, calls with arity |
740 | // mismatch (the callframe is slided) and tail calls. |
741 | m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister); |
742 | }; |
743 | |
744 | CallLinkInfo* callLinkInfo = m_jit.codeBlock()->addCallLinkInfo(); |
745 | callLinkInfo->setUpCall(callType, m_currentNode->origin.semantic, calleeGPR); |
746 | |
747 | if (node->op() == CallEval) { |
748 | // We want to call operationCallEval but we don't want to overwrite the parameter area in |
749 | // which we have created a prototypical eval call frame. This means that we have to |
750 | // subtract stack to make room for the call. Lucky for us, at this point we have the whole |
751 | // register file to ourselves. |
752 | |
753 | m_jit.emitStoreCallSiteIndex(callSite); |
754 | m_jit.addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), JITCompiler::stackPointerRegister, GPRInfo::regT0); |
755 | m_jit.storePtr(GPRInfo::callFrameRegister, JITCompiler::Address(GPRInfo::regT0, CallFrame::callerFrameOffset())); |
756 | |
757 | // Now we need to make room for: |
758 | // - The caller frame and PC of a call to operationCallEval. |
759 | // - Potentially two arguments on the stack. |
760 | unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(CallFrame*) * 2; |
761 | requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes); |
762 | m_jit.subPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister); |
763 | m_jit.setupArguments<decltype(operationCallEval)>(TrustedImmPtr::weakPointer(m_graph, globalObject), GPRInfo::regT0); |
764 | prepareForExternalCall(); |
765 | m_jit.appendCall(operationCallEval); |
766 | m_jit.exceptionCheck(); |
767 | JITCompiler::Jump done = m_jit.branchIfNotEmpty(GPRInfo::returnValueGPR); |
768 | |
769 | // This is the part where we meant to make a normal call. Oops. |
770 | m_jit.addPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister); |
771 | m_jit.load64(JITCompiler::calleeFrameSlot(CallFrameSlot::callee), GPRInfo::regT0); |
772 | m_jit.emitDumbVirtualCall(vm(), globalObject, callLinkInfo); |
773 | |
774 | done.link(&m_jit); |
775 | setResultAndResetStack(); |
776 | return; |
777 | } |
778 | |
779 | if (isDirect) { |
780 | callLinkInfo->setExecutableDuringCompilation(executable); |
781 | callLinkInfo->setMaxArgumentCountIncludingThis(numAllocatedArgs); |
782 | |
783 | if (isTail) { |
784 | RELEASE_ASSERT(node->op() == DirectTailCall); |
785 | |
786 | JITCompiler::PatchableJump patchableJump = m_jit.patchableJump(); |
787 | JITCompiler::Label mainPath = m_jit.label(); |
788 | |
789 | m_jit.emitStoreCallSiteIndex(callSite); |
790 | |
791 | callLinkInfo->setFrameShuffleData(shuffleData); |
792 | CallFrameShuffler(m_jit, shuffleData).prepareForTailCall(); |
793 | |
794 | JITCompiler::Call call = m_jit.nearTailCall(); |
795 | |
796 | JITCompiler::Label slowPath = m_jit.label(); |
797 | patchableJump.m_jump.linkTo(slowPath, &m_jit); |
798 | |
799 | silentSpillAllRegisters(InvalidGPRReg); |
800 | callOperation(operationLinkDirectCall, callLinkInfo, calleeGPR); |
801 | silentFillAllRegisters(); |
802 | m_jit.exceptionCheck(); |
803 | m_jit.jump().linkTo(mainPath, &m_jit); |
804 | |
805 | useChildren(node); |
806 | |
807 | m_jit.addJSDirectTailCall(patchableJump, call, slowPath, callLinkInfo); |
808 | return; |
809 | } |
810 | |
811 | JITCompiler::Label mainPath = m_jit.label(); |
812 | |
813 | m_jit.emitStoreCallSiteIndex(callSite); |
814 | |
815 | JITCompiler::Call call = m_jit.nearCall(); |
816 | JITCompiler::Jump done = m_jit.jump(); |
817 | |
818 | JITCompiler::Label slowPath = m_jit.label(); |
819 | if (isX86()) |
820 | m_jit.pop(JITCompiler::selectScratchGPR(calleeGPR)); |
821 | |
822 | callOperation(operationLinkDirectCall, callLinkInfo, calleeGPR); |
823 | m_jit.exceptionCheck(); |
824 | m_jit.jump().linkTo(mainPath, &m_jit); |
825 | |
826 | done.link(&m_jit); |
827 | |
828 | setResultAndResetStack(); |
829 | |
830 | m_jit.addJSDirectCall(call, slowPath, callLinkInfo); |
831 | return; |
832 | } |
833 | |
834 | m_jit.emitStoreCallSiteIndex(callSite); |
835 | |
836 | JITCompiler::DataLabelPtr targetToCheck; |
837 | JITCompiler::Jump slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, TrustedImmPtr(nullptr)); |
838 | |
839 | if (isTail) { |
840 | if (node->op() == TailCall) { |
841 | callLinkInfo->setFrameShuffleData(shuffleData); |
842 | CallFrameShuffler(m_jit, shuffleData).prepareForTailCall(); |
843 | } else { |
844 | m_jit.emitRestoreCalleeSaves(); |
845 | m_jit.prepareForTailCallSlow(); |
846 | } |
847 | } |
848 | |
849 | JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall(); |
850 | |
851 | JITCompiler::Jump done = m_jit.jump(); |
852 | |
853 | slowPath.link(&m_jit); |
854 | |
855 | if (node->op() == TailCall) { |
856 | CallFrameShuffler callFrameShuffler(m_jit, shuffleData); |
857 | callFrameShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0)); |
858 | callFrameShuffler.prepareForSlowPath(); |
859 | } else { |
860 | m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0 |
861 | |
862 | if (isTail) |
863 | m_jit.emitRestoreCalleeSaves(); // This needs to happen after we moved calleeGPR to regT0 |
864 | } |
865 | |
866 | m_jit.move(TrustedImmPtr(callLinkInfo), GPRInfo::regT2); // Link info needs to be in regT2 |
867 | m_jit.move(TrustedImmPtr::weakPointer(m_graph, globalObject), GPRInfo::regT3); // JSGlobalObject needs to be in regT3 |
868 | JITCompiler::Call slowCall = m_jit.nearCall(); |
869 | |
870 | done.link(&m_jit); |
871 | |
872 | if (isTail) |
873 | m_jit.abortWithReason(JITDidReturnFromTailCall); |
874 | else |
875 | setResultAndResetStack(); |
876 | |
877 | m_jit.addJSCall(fastCall, slowCall, targetToCheck, callLinkInfo); |
878 | } |
879 | |
880 | // Clang should allow unreachable [[clang::fallthrough]] in template functions if any template expansion uses it |
881 | // http://llvm.org/bugs/show_bug.cgi?id=18619 |
882 | IGNORE_WARNINGS_BEGIN("implicit-fallthrough" ) |
883 | template<bool strict> |
884 | GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat) |
885 | { |
886 | AbstractValue& value = m_state.forNode(edge); |
887 | SpeculatedType type = value.m_type; |
888 | ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32Only)); |
889 | |
890 | m_interpreter.filter(value, SpecInt32Only); |
891 | if (value.isClear()) { |
892 | if (mayHaveTypeCheck(edge.useKind())) |
893 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); |
894 | returnFormat = DataFormatInt32; |
895 | return allocate(); |
896 | } |
897 | |
898 | VirtualRegister virtualRegister = edge->virtualRegister(); |
899 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
900 | |
901 | switch (info.registerFormat()) { |
902 | case DataFormatNone: { |
903 | GPRReg gpr = allocate(); |
904 | |
905 | if (edge->hasConstant()) { |
906 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); |
907 | ASSERT(edge->isInt32Constant()); |
908 | m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr); |
909 | info.fillInt32(*m_stream, gpr); |
910 | returnFormat = DataFormatInt32; |
911 | return gpr; |
912 | } |
913 | |
914 | DataFormat spillFormat = info.spillFormat(); |
915 | |
916 | DFG_ASSERT(m_jit.graph(), m_currentNode, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32, spillFormat); |
917 | |
918 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); |
919 | |
920 | if (spillFormat == DataFormatJSInt32 || spillFormat == DataFormatInt32) { |
921 | // If we know this was spilled as an integer we can fill without checking. |
922 | if (strict) { |
923 | m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); |
924 | info.fillInt32(*m_stream, gpr); |
925 | returnFormat = DataFormatInt32; |
926 | return gpr; |
927 | } |
928 | if (spillFormat == DataFormatInt32) { |
929 | m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); |
930 | info.fillInt32(*m_stream, gpr); |
931 | returnFormat = DataFormatInt32; |
932 | } else { |
933 | m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); |
934 | info.fillJSValue(*m_stream, gpr, DataFormatJSInt32); |
935 | returnFormat = DataFormatJSInt32; |
936 | } |
937 | return gpr; |
938 | } |
939 | m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); |
940 | |
941 | // Fill as JSValue, and fall through. |
942 | info.fillJSValue(*m_stream, gpr, DataFormatJSInt32); |
943 | m_gprs.unlock(gpr); |
944 | FALLTHROUGH; |
945 | } |
946 | |
947 | case DataFormatJS: { |
948 | DFG_ASSERT(m_jit.graph(), m_currentNode, !(type & SpecInt52Any)); |
949 | // Check the value is an integer. |
950 | GPRReg gpr = info.gpr(); |
951 | m_gprs.lock(gpr); |
952 | if (type & ~SpecInt32Only) |
953 | speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotInt32(gpr)); |
954 | info.fillJSValue(*m_stream, gpr, DataFormatJSInt32); |
955 | // If !strict we're done, return. |
956 | if (!strict) { |
957 | returnFormat = DataFormatJSInt32; |
958 | return gpr; |
959 | } |
960 | // else fall through & handle as DataFormatJSInt32. |
961 | m_gprs.unlock(gpr); |
962 | FALLTHROUGH; |
963 | } |
964 | |
965 | case DataFormatJSInt32: { |
966 | // In a strict fill we need to strip off the value tag. |
967 | if (strict) { |
968 | GPRReg gpr = info.gpr(); |
969 | GPRReg result; |
970 | // If the register has already been locked we need to take a copy. |
971 | // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32. |
972 | if (m_gprs.isLocked(gpr)) |
973 | result = allocate(); |
974 | else { |
975 | m_gprs.lock(gpr); |
976 | info.fillInt32(*m_stream, gpr); |
977 | result = gpr; |
978 | } |
979 | m_jit.zeroExtend32ToPtr(gpr, result); |
980 | returnFormat = DataFormatInt32; |
981 | return result; |
982 | } |
983 | |
984 | GPRReg gpr = info.gpr(); |
985 | m_gprs.lock(gpr); |
986 | returnFormat = DataFormatJSInt32; |
987 | return gpr; |
988 | } |
989 | |
990 | case DataFormatInt32: { |
991 | GPRReg gpr = info.gpr(); |
992 | m_gprs.lock(gpr); |
993 | returnFormat = DataFormatInt32; |
994 | return gpr; |
995 | } |
996 | |
997 | case DataFormatJSDouble: |
998 | case DataFormatCell: |
999 | case DataFormatBoolean: |
1000 | case DataFormatJSCell: |
1001 | case DataFormatJSBoolean: |
1002 | case DataFormatDouble: |
1003 | case DataFormatStorage: |
1004 | case DataFormatInt52: |
1005 | case DataFormatStrictInt52: |
1006 | DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format" ); |
1007 | |
1008 | default: |
1009 | DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format" ); |
1010 | return InvalidGPRReg; |
1011 | } |
1012 | } |
1013 | IGNORE_WARNINGS_END |
1014 | |
1015 | GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat) |
1016 | { |
1017 | return fillSpeculateInt32Internal<false>(edge, returnFormat); |
1018 | } |
1019 | |
1020 | GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge) |
1021 | { |
1022 | DataFormat mustBeDataFormatInt32; |
1023 | GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32); |
1024 | DFG_ASSERT(m_jit.graph(), m_currentNode, mustBeDataFormatInt32 == DataFormatInt32, mustBeDataFormatInt32); |
1025 | return result; |
1026 | } |
1027 | |
1028 | GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat) |
1029 | { |
1030 | ASSERT(desiredFormat == DataFormatInt52 || desiredFormat == DataFormatStrictInt52); |
1031 | AbstractValue& value = m_state.forNode(edge); |
1032 | |
1033 | m_interpreter.filter(value, SpecInt52Any); |
1034 | if (value.isClear()) { |
1035 | if (mayHaveTypeCheck(edge.useKind())) |
1036 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); |
1037 | return allocate(); |
1038 | } |
1039 | |
1040 | VirtualRegister virtualRegister = edge->virtualRegister(); |
1041 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
1042 | |
1043 | switch (info.registerFormat()) { |
1044 | case DataFormatNone: { |
1045 | GPRReg gpr = allocate(); |
1046 | |
1047 | if (edge->hasConstant()) { |
1048 | JSValue jsValue = edge->asJSValue(); |
1049 | ASSERT(jsValue.isAnyInt()); |
1050 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); |
1051 | int64_t value = jsValue.asAnyInt(); |
1052 | if (desiredFormat == DataFormatInt52) |
1053 | value = value << JSValue::int52ShiftAmount; |
1054 | m_jit.move(MacroAssembler::Imm64(value), gpr); |
1055 | info.fillGPR(*m_stream, gpr, desiredFormat); |
1056 | return gpr; |
1057 | } |
1058 | |
1059 | DataFormat spillFormat = info.spillFormat(); |
1060 | |
1061 | DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52, spillFormat); |
1062 | |
1063 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); |
1064 | |
1065 | m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); |
1066 | if (desiredFormat == DataFormatStrictInt52) { |
1067 | if (spillFormat == DataFormatInt52) |
1068 | m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); |
1069 | info.fillStrictInt52(*m_stream, gpr); |
1070 | return gpr; |
1071 | } |
1072 | if (spillFormat == DataFormatStrictInt52) |
1073 | m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); |
1074 | info.fillInt52(*m_stream, gpr); |
1075 | return gpr; |
1076 | } |
1077 | |
1078 | case DataFormatStrictInt52: { |
1079 | GPRReg gpr = info.gpr(); |
1080 | bool wasLocked = m_gprs.isLocked(gpr); |
1081 | lock(gpr); |
1082 | if (desiredFormat == DataFormatStrictInt52) |
1083 | return gpr; |
1084 | if (wasLocked) { |
1085 | GPRReg result = allocate(); |
1086 | m_jit.move(gpr, result); |
1087 | unlock(gpr); |
1088 | gpr = result; |
1089 | } else |
1090 | info.fillInt52(*m_stream, gpr); |
1091 | m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); |
1092 | return gpr; |
1093 | } |
1094 | |
1095 | case DataFormatInt52: { |
1096 | GPRReg gpr = info.gpr(); |
1097 | bool wasLocked = m_gprs.isLocked(gpr); |
1098 | lock(gpr); |
1099 | if (desiredFormat == DataFormatInt52) |
1100 | return gpr; |
1101 | if (wasLocked) { |
1102 | GPRReg result = allocate(); |
1103 | m_jit.move(gpr, result); |
1104 | unlock(gpr); |
1105 | gpr = result; |
1106 | } else |
1107 | info.fillStrictInt52(*m_stream, gpr); |
1108 | m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr); |
1109 | return gpr; |
1110 | } |
1111 | |
1112 | default: |
1113 | DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format" ); |
1114 | return InvalidGPRReg; |
1115 | } |
1116 | } |
1117 | |
1118 | FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge) |
1119 | { |
1120 | ASSERT(edge.useKind() == DoubleRepUse || edge.useKind() == DoubleRepRealUse || edge.useKind() == DoubleRepAnyIntUse); |
1121 | ASSERT(edge->hasDoubleResult()); |
1122 | VirtualRegister virtualRegister = edge->virtualRegister(); |
1123 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
1124 | |
1125 | if (info.registerFormat() == DataFormatNone) { |
1126 | if (edge->hasConstant()) { |
1127 | if (edge->isNumberConstant()) { |
1128 | FPRReg fpr = fprAllocate(); |
1129 | int64_t doubleAsInt = reinterpretDoubleToInt64(edge->asNumber()); |
1130 | if (!doubleAsInt) |
1131 | m_jit.moveZeroToDouble(fpr); |
1132 | else { |
1133 | GPRReg gpr = allocate(); |
1134 | m_jit.move(MacroAssembler::Imm64(doubleAsInt), gpr); |
1135 | m_jit.move64ToDouble(gpr, fpr); |
1136 | unlock(gpr); |
1137 | } |
1138 | |
1139 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); |
1140 | info.fillDouble(*m_stream, fpr); |
1141 | return fpr; |
1142 | } |
1143 | if (mayHaveTypeCheck(edge.useKind())) |
1144 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); |
1145 | return fprAllocate(); |
1146 | } |
1147 | |
1148 | DataFormat spillFormat = info.spillFormat(); |
1149 | if (spillFormat != DataFormatDouble) { |
1150 | DFG_CRASH( |
1151 | m_jit.graph(), m_currentNode, toCString( |
1152 | "Expected " , edge, " to have double format but instead it is spilled as " , |
1153 | dataFormatToString(spillFormat)).data()); |
1154 | } |
1155 | DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatDouble, spillFormat); |
1156 | FPRReg fpr = fprAllocate(); |
1157 | m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); |
1158 | m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); |
1159 | info.fillDouble(*m_stream, fpr); |
1160 | return fpr; |
1161 | } |
1162 | |
1163 | DFG_ASSERT(m_jit.graph(), m_currentNode, info.registerFormat() == DataFormatDouble, info.registerFormat()); |
1164 | FPRReg fpr = info.fpr(); |
1165 | m_fprs.lock(fpr); |
1166 | return fpr; |
1167 | } |
1168 | |
1169 | GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) |
1170 | { |
1171 | AbstractValue& value = m_state.forNode(edge); |
1172 | SpeculatedType type = value.m_type; |
1173 | ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCellCheck)); |
1174 | |
1175 | m_interpreter.filter(value, SpecCellCheck); |
1176 | if (value.isClear()) { |
1177 | if (mayHaveTypeCheck(edge.useKind())) |
1178 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); |
1179 | return allocate(); |
1180 | } |
1181 | |
1182 | VirtualRegister virtualRegister = edge->virtualRegister(); |
1183 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
1184 | |
1185 | switch (info.registerFormat()) { |
1186 | case DataFormatNone: { |
1187 | GPRReg gpr = allocate(); |
1188 | |
1189 | if (edge->hasConstant()) { |
1190 | JSValue jsValue = edge->asJSValue(); |
1191 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); |
1192 | m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr); |
1193 | info.fillJSValue(*m_stream, gpr, DataFormatJSCell); |
1194 | return gpr; |
1195 | } |
1196 | |
1197 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); |
1198 | m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); |
1199 | |
1200 | info.fillJSValue(*m_stream, gpr, DataFormatJS); |
1201 | if (type & ~SpecCellCheck) |
1202 | speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr))); |
1203 | info.fillJSValue(*m_stream, gpr, DataFormatJSCell); |
1204 | return gpr; |
1205 | } |
1206 | |
1207 | case DataFormatCell: |
1208 | case DataFormatJSCell: { |
1209 | GPRReg gpr = info.gpr(); |
1210 | m_gprs.lock(gpr); |
1211 | if (!ASSERT_DISABLED) { |
1212 | MacroAssembler::Jump checkCell = m_jit.branchIfCell(JSValueRegs(gpr)); |
1213 | m_jit.abortWithReason(DFGIsNotCell); |
1214 | checkCell.link(&m_jit); |
1215 | } |
1216 | return gpr; |
1217 | } |
1218 | |
1219 | case DataFormatJS: { |
1220 | GPRReg gpr = info.gpr(); |
1221 | m_gprs.lock(gpr); |
1222 | if (type & ~SpecCellCheck) |
1223 | speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr))); |
1224 | info.fillJSValue(*m_stream, gpr, DataFormatJSCell); |
1225 | return gpr; |
1226 | } |
1227 | |
1228 | case DataFormatJSInt32: |
1229 | case DataFormatInt32: |
1230 | case DataFormatJSDouble: |
1231 | case DataFormatJSBoolean: |
1232 | case DataFormatBoolean: |
1233 | case DataFormatDouble: |
1234 | case DataFormatStorage: |
1235 | case DataFormatInt52: |
1236 | case DataFormatStrictInt52: |
1237 | DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format" ); |
1238 | |
1239 | default: |
1240 | DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format" ); |
1241 | return InvalidGPRReg; |
1242 | } |
1243 | } |
1244 | |
1245 | GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge) |
1246 | { |
1247 | AbstractValue& value = m_state.forNode(edge); |
1248 | SpeculatedType type = value.m_type; |
1249 | ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean)); |
1250 | |
1251 | m_interpreter.filter(value, SpecBoolean); |
1252 | if (value.isClear()) { |
1253 | if (mayHaveTypeCheck(edge.useKind())) |
1254 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); |
1255 | return allocate(); |
1256 | } |
1257 | |
1258 | VirtualRegister virtualRegister = edge->virtualRegister(); |
1259 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
1260 | |
1261 | switch (info.registerFormat()) { |
1262 | case DataFormatNone: { |
1263 | GPRReg gpr = allocate(); |
1264 | |
1265 | if (edge->hasConstant()) { |
1266 | JSValue jsValue = edge->asJSValue(); |
1267 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); |
1268 | m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr); |
1269 | info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean); |
1270 | return gpr; |
1271 | } |
1272 | DFG_ASSERT(m_jit.graph(), m_currentNode, info.spillFormat() & DataFormatJS, info.spillFormat()); |
1273 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); |
1274 | m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr); |
1275 | |
1276 | info.fillJSValue(*m_stream, gpr, DataFormatJS); |
1277 | if (type & ~SpecBoolean) { |
1278 | m_jit.xor64(TrustedImm32(JSValue::ValueFalse), gpr); |
1279 | speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg)); |
1280 | m_jit.xor64(TrustedImm32(JSValue::ValueFalse), gpr); |
1281 | } |
1282 | info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean); |
1283 | return gpr; |
1284 | } |
1285 | |
1286 | case DataFormatBoolean: |
1287 | case DataFormatJSBoolean: { |
1288 | GPRReg gpr = info.gpr(); |
1289 | m_gprs.lock(gpr); |
1290 | return gpr; |
1291 | } |
1292 | |
1293 | case DataFormatJS: { |
1294 | GPRReg gpr = info.gpr(); |
1295 | m_gprs.lock(gpr); |
1296 | if (type & ~SpecBoolean) { |
1297 | m_jit.xor64(TrustedImm32(JSValue::ValueFalse), gpr); |
1298 | speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg)); |
1299 | m_jit.xor64(TrustedImm32(JSValue::ValueFalse), gpr); |
1300 | } |
1301 | info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean); |
1302 | return gpr; |
1303 | } |
1304 | |
1305 | case DataFormatJSInt32: |
1306 | case DataFormatInt32: |
1307 | case DataFormatJSDouble: |
1308 | case DataFormatJSCell: |
1309 | case DataFormatCell: |
1310 | case DataFormatDouble: |
1311 | case DataFormatStorage: |
1312 | case DataFormatInt52: |
1313 | case DataFormatStrictInt52: |
1314 | DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format" ); |
1315 | |
1316 | default: |
1317 | DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format" ); |
1318 | return InvalidGPRReg; |
1319 | } |
1320 | } |
1321 | |
1322 | void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild) |
1323 | { |
1324 | SpeculateCellOperand op1(this, objectChild); |
1325 | JSValueOperand op2(this, otherChild); |
1326 | GPRTemporary result(this); |
1327 | |
1328 | GPRReg op1GPR = op1.gpr(); |
1329 | GPRReg op2GPR = op2.gpr(); |
1330 | GPRReg resultGPR = result.gpr(); |
1331 | |
1332 | DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1333 | |
1334 | // At this point we know that we can perform a straight-forward equality comparison on pointer |
1335 | // values because we are doing strict equality. |
1336 | m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR); |
1337 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), resultGPR); |
1338 | jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean); |
1339 | } |
1340 | |
1341 | void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode) |
1342 | { |
1343 | BasicBlock* taken = branchNode->branchData()->taken.block; |
1344 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; |
1345 | |
1346 | SpeculateCellOperand op1(this, objectChild); |
1347 | JSValueOperand op2(this, otherChild); |
1348 | |
1349 | GPRReg op1GPR = op1.gpr(); |
1350 | GPRReg op2GPR = op2.gpr(); |
1351 | |
1352 | DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1353 | |
1354 | if (taken == nextBlock()) { |
1355 | branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken); |
1356 | jump(taken); |
1357 | } else { |
1358 | branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken); |
1359 | jump(notTaken); |
1360 | } |
1361 | } |
1362 | |
1363 | void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild) |
1364 | { |
1365 | SpeculateCellOperand op1(this, leftChild); |
1366 | JSValueOperand op2(this, rightChild, ManualOperandSpeculation); |
1367 | GPRTemporary result(this); |
1368 | |
1369 | GPRReg op1GPR = op1.gpr(); |
1370 | GPRReg op2GPR = op2.gpr(); |
1371 | GPRReg resultGPR = result.gpr(); |
1372 | |
1373 | bool masqueradesAsUndefinedWatchpointValid = |
1374 | masqueradesAsUndefinedWatchpointIsStillValid(); |
1375 | |
1376 | if (masqueradesAsUndefinedWatchpointValid) { |
1377 | DFG_TYPE_CHECK( |
1378 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1379 | } else { |
1380 | DFG_TYPE_CHECK( |
1381 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1382 | speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, |
1383 | m_jit.branchTest8( |
1384 | MacroAssembler::NonZero, |
1385 | MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), |
1386 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); |
1387 | } |
1388 | |
1389 | // It seems that most of the time when programs do a == b where b may be either null/undefined |
1390 | // or an object, b is usually an object. Balance the branches to make that case fast. |
1391 | MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR)); |
1392 | |
1393 | // We know that within this branch, rightChild must be a cell. |
1394 | if (masqueradesAsUndefinedWatchpointValid) { |
1395 | DFG_TYPE_CHECK( |
1396 | JSValueRegs(op2GPR), rightChild, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(op2GPR)); |
1397 | } else { |
1398 | DFG_TYPE_CHECK( |
1399 | JSValueRegs(op2GPR), rightChild, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(op2GPR)); |
1400 | speculationCheck(BadType, JSValueRegs(op2GPR), rightChild, |
1401 | m_jit.branchTest8( |
1402 | MacroAssembler::NonZero, |
1403 | MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), |
1404 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); |
1405 | } |
1406 | |
1407 | // At this point we know that we can perform a straight-forward equality comparison on pointer |
1408 | // values because both left and right are pointers to objects that have no special equality |
1409 | // protocols. |
1410 | m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR); |
1411 | MacroAssembler::Jump done = m_jit.jump(); |
1412 | |
1413 | rightNotCell.link(&m_jit); |
1414 | |
1415 | // We know that within this branch, rightChild must not be a cell. Check if that is enough to |
1416 | // prove that it is either null or undefined. |
1417 | if (needsTypeCheck(rightChild, SpecCellCheck | SpecOther)) { |
1418 | m_jit.move(op2GPR, resultGPR); |
1419 | m_jit.and64(MacroAssembler::TrustedImm32(~JSValue::UndefinedTag), resultGPR); |
1420 | |
1421 | typeCheck( |
1422 | JSValueRegs(op2GPR), rightChild, SpecCellCheck | SpecOther, |
1423 | m_jit.branch64( |
1424 | MacroAssembler::NotEqual, resultGPR, |
1425 | MacroAssembler::TrustedImm64(JSValue::ValueNull))); |
1426 | } |
1427 | m_jit.move(TrustedImm32(0), result.gpr()); |
1428 | |
1429 | done.link(&m_jit); |
1430 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), resultGPR); |
1431 | jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean); |
1432 | } |
1433 | |
1434 | void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode) |
1435 | { |
1436 | BasicBlock* taken = branchNode->branchData()->taken.block; |
1437 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; |
1438 | |
1439 | SpeculateCellOperand op1(this, leftChild); |
1440 | JSValueOperand op2(this, rightChild, ManualOperandSpeculation); |
1441 | GPRTemporary result(this); |
1442 | |
1443 | GPRReg op1GPR = op1.gpr(); |
1444 | GPRReg op2GPR = op2.gpr(); |
1445 | GPRReg resultGPR = result.gpr(); |
1446 | |
1447 | bool masqueradesAsUndefinedWatchpointValid = |
1448 | masqueradesAsUndefinedWatchpointIsStillValid(); |
1449 | |
1450 | if (masqueradesAsUndefinedWatchpointValid) { |
1451 | DFG_TYPE_CHECK( |
1452 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1453 | } else { |
1454 | DFG_TYPE_CHECK( |
1455 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1456 | speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, |
1457 | m_jit.branchTest8( |
1458 | MacroAssembler::NonZero, |
1459 | MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), |
1460 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); |
1461 | } |
1462 | |
1463 | // It seems that most of the time when programs do a == b where b may be either null/undefined |
1464 | // or an object, b is usually an object. Balance the branches to make that case fast. |
1465 | MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR)); |
1466 | |
1467 | // We know that within this branch, rightChild must be a cell. |
1468 | if (masqueradesAsUndefinedWatchpointValid) { |
1469 | DFG_TYPE_CHECK( |
1470 | JSValueRegs(op2GPR), rightChild, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(op2GPR)); |
1471 | } else { |
1472 | DFG_TYPE_CHECK( |
1473 | JSValueRegs(op2GPR), rightChild, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(op2GPR)); |
1474 | speculationCheck(BadType, JSValueRegs(op2GPR), rightChild, |
1475 | m_jit.branchTest8( |
1476 | MacroAssembler::NonZero, |
1477 | MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), |
1478 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); |
1479 | } |
1480 | |
1481 | // At this point we know that we can perform a straight-forward equality comparison on pointer |
1482 | // values because both left and right are pointers to objects that have no special equality |
1483 | // protocols. |
1484 | branch64(MacroAssembler::Equal, op1GPR, op2GPR, taken); |
1485 | |
1486 | // We know that within this branch, rightChild must not be a cell. Check if that is enough to |
1487 | // prove that it is either null or undefined. |
1488 | if (!needsTypeCheck(rightChild, SpecCellCheck | SpecOther)) |
1489 | rightNotCell.link(&m_jit); |
1490 | else { |
1491 | jump(notTaken, ForceJump); |
1492 | |
1493 | rightNotCell.link(&m_jit); |
1494 | m_jit.move(op2GPR, resultGPR); |
1495 | m_jit.and64(MacroAssembler::TrustedImm32(~JSValue::UndefinedTag), resultGPR); |
1496 | |
1497 | typeCheck( |
1498 | JSValueRegs(op2GPR), rightChild, SpecCellCheck | SpecOther, m_jit.branch64( |
1499 | MacroAssembler::NotEqual, resultGPR, |
1500 | MacroAssembler::TrustedImm64(JSValue::ValueNull))); |
1501 | } |
1502 | |
1503 | jump(notTaken); |
1504 | } |
1505 | |
1506 | void SpeculativeJIT::compileSymbolUntypedEquality(Node* node, Edge symbolEdge, Edge untypedEdge) |
1507 | { |
1508 | SpeculateCellOperand symbol(this, symbolEdge); |
1509 | JSValueOperand untyped(this, untypedEdge); |
1510 | GPRTemporary result(this, Reuse, symbol, untyped); |
1511 | |
1512 | GPRReg symbolGPR = symbol.gpr(); |
1513 | GPRReg untypedGPR = untyped.gpr(); |
1514 | GPRReg resultGPR = result.gpr(); |
1515 | |
1516 | speculateSymbol(symbolEdge, symbolGPR); |
1517 | |
1518 | // At this point we know that we can perform a straight-forward equality comparison on pointer |
1519 | // values because we are doing strict equality. |
1520 | m_jit.compare64(MacroAssembler::Equal, symbolGPR, untypedGPR, resultGPR); |
1521 | unblessedBooleanResult(resultGPR, node); |
1522 | } |
1523 | |
1524 | void SpeculativeJIT::compileInt52Compare(Node* node, MacroAssembler::RelationalCondition condition) |
1525 | { |
1526 | SpeculateWhicheverInt52Operand op1(this, node->child1()); |
1527 | SpeculateWhicheverInt52Operand op2(this, node->child2(), op1); |
1528 | GPRTemporary result(this, Reuse, op1, op2); |
1529 | |
1530 | m_jit.compare64(condition, op1.gpr(), op2.gpr(), result.gpr()); |
1531 | |
1532 | // If we add a DataFormatBool, we should use it here. |
1533 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
1534 | jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean); |
1535 | } |
1536 | |
1537 | void SpeculativeJIT::compilePeepHoleInt52Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition) |
1538 | { |
1539 | BasicBlock* taken = branchNode->branchData()->taken.block; |
1540 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; |
1541 | |
1542 | // The branch instruction will branch to the taken block. |
1543 | // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. |
1544 | if (taken == nextBlock()) { |
1545 | condition = JITCompiler::invert(condition); |
1546 | BasicBlock* tmp = taken; |
1547 | taken = notTaken; |
1548 | notTaken = tmp; |
1549 | } |
1550 | |
1551 | SpeculateWhicheverInt52Operand op1(this, node->child1()); |
1552 | SpeculateWhicheverInt52Operand op2(this, node->child2(), op1); |
1553 | |
1554 | branch64(condition, op1.gpr(), op2.gpr(), taken); |
1555 | jump(notTaken); |
1556 | } |
1557 | |
1558 | void SpeculativeJIT::compileCompareEqPtr(Node* node) |
1559 | { |
1560 | JSValueOperand value(this, node->child1()); |
1561 | GPRTemporary result(this); |
1562 | GPRReg valueGPR = value.gpr(); |
1563 | GPRReg resultGPR = result.gpr(); |
1564 | |
1565 | m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), node->cellOperand()->cell()), resultGPR); |
1566 | m_jit.compare64(MacroAssembler::Equal, valueGPR, resultGPR, resultGPR); |
1567 | unblessedBooleanResult(resultGPR, node); |
1568 | } |
1569 | |
1570 | void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse) |
1571 | { |
1572 | JSValueOperand value(this, nodeUse, ManualOperandSpeculation); |
1573 | GPRTemporary result(this); |
1574 | GPRReg valueGPR = value.gpr(); |
1575 | GPRReg resultGPR = result.gpr(); |
1576 | GPRTemporary structure; |
1577 | GPRReg structureGPR = InvalidGPRReg; |
1578 | GPRTemporary scratch; |
1579 | GPRReg scratchGPR = InvalidGPRReg; |
1580 | |
1581 | bool masqueradesAsUndefinedWatchpointValid = |
1582 | masqueradesAsUndefinedWatchpointIsStillValid(); |
1583 | |
1584 | if (!masqueradesAsUndefinedWatchpointValid) { |
1585 | // The masquerades as undefined case will use the structure register, so allocate it here. |
1586 | // Do this at the top of the function to avoid branching around a register allocation. |
1587 | GPRTemporary realStructure(this); |
1588 | GPRTemporary realScratch(this); |
1589 | structure.adopt(realStructure); |
1590 | scratch.adopt(realScratch); |
1591 | structureGPR = structure.gpr(); |
1592 | scratchGPR = scratch.gpr(); |
1593 | } |
1594 | |
1595 | MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR)); |
1596 | if (masqueradesAsUndefinedWatchpointValid) { |
1597 | DFG_TYPE_CHECK( |
1598 | JSValueRegs(valueGPR), nodeUse, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(valueGPR)); |
1599 | } else { |
1600 | DFG_TYPE_CHECK( |
1601 | JSValueRegs(valueGPR), nodeUse, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(valueGPR)); |
1602 | |
1603 | MacroAssembler::Jump isNotMasqueradesAsUndefined = |
1604 | m_jit.branchTest8( |
1605 | MacroAssembler::Zero, |
1606 | MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()), |
1607 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined)); |
1608 | |
1609 | m_jit.emitLoadStructure(vm(), valueGPR, structureGPR, scratchGPR); |
1610 | speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse, |
1611 | m_jit.branchPtr( |
1612 | MacroAssembler::Equal, |
1613 | MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()), |
1614 | TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); |
1615 | |
1616 | isNotMasqueradesAsUndefined.link(&m_jit); |
1617 | } |
1618 | m_jit.move(TrustedImm32(JSValue::ValueFalse), resultGPR); |
1619 | MacroAssembler::Jump done = m_jit.jump(); |
1620 | |
1621 | notCell.link(&m_jit); |
1622 | |
1623 | if (needsTypeCheck(nodeUse, SpecCellCheck | SpecOther)) { |
1624 | m_jit.move(valueGPR, resultGPR); |
1625 | m_jit.and64(MacroAssembler::TrustedImm32(~JSValue::UndefinedTag), resultGPR); |
1626 | typeCheck( |
1627 | JSValueRegs(valueGPR), nodeUse, SpecCellCheck | SpecOther, m_jit.branch64( |
1628 | MacroAssembler::NotEqual, |
1629 | resultGPR, |
1630 | MacroAssembler::TrustedImm64(JSValue::ValueNull))); |
1631 | } |
1632 | m_jit.move(TrustedImm32(JSValue::ValueTrue), resultGPR); |
1633 | |
1634 | done.link(&m_jit); |
1635 | |
1636 | jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean); |
1637 | } |
1638 | |
1639 | void SpeculativeJIT::compileLogicalNot(Node* node) |
1640 | { |
1641 | switch (node->child1().useKind()) { |
1642 | case ObjectOrOtherUse: { |
1643 | compileObjectOrOtherLogicalNot(node->child1()); |
1644 | return; |
1645 | } |
1646 | |
1647 | case Int32Use: { |
1648 | SpeculateInt32Operand value(this, node->child1()); |
1649 | GPRTemporary result(this, Reuse, value); |
1650 | m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), result.gpr()); |
1651 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
1652 | jsValueResult(result.gpr(), node, DataFormatJSBoolean); |
1653 | return; |
1654 | } |
1655 | |
1656 | case DoubleRepUse: { |
1657 | SpeculateDoubleOperand value(this, node->child1()); |
1658 | FPRTemporary scratch(this); |
1659 | GPRTemporary result(this); |
1660 | m_jit.move(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
1661 | MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr()); |
1662 | m_jit.xor32(TrustedImm32(true), result.gpr()); |
1663 | nonZero.link(&m_jit); |
1664 | jsValueResult(result.gpr(), node, DataFormatJSBoolean); |
1665 | return; |
1666 | } |
1667 | |
1668 | case BooleanUse: |
1669 | case KnownBooleanUse: { |
1670 | if (!needsTypeCheck(node->child1(), SpecBoolean)) { |
1671 | SpeculateBooleanOperand value(this, node->child1()); |
1672 | GPRTemporary result(this, Reuse, value); |
1673 | |
1674 | m_jit.move(value.gpr(), result.gpr()); |
1675 | m_jit.xor64(TrustedImm32(true), result.gpr()); |
1676 | |
1677 | jsValueResult(result.gpr(), node, DataFormatJSBoolean); |
1678 | return; |
1679 | } |
1680 | |
1681 | JSValueOperand value(this, node->child1(), ManualOperandSpeculation); |
1682 | GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add). |
1683 | |
1684 | m_jit.move(value.gpr(), result.gpr()); |
1685 | m_jit.xor64(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
1686 | typeCheck( |
1687 | JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64( |
1688 | JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1)))); |
1689 | m_jit.xor64(TrustedImm32(JSValue::ValueTrue), result.gpr()); |
1690 | |
1691 | // If we add a DataFormatBool, we should use it here. |
1692 | jsValueResult(result.gpr(), node, DataFormatJSBoolean); |
1693 | return; |
1694 | } |
1695 | |
1696 | case UntypedUse: { |
1697 | JSValueOperand arg1(this, node->child1()); |
1698 | GPRTemporary result(this); |
1699 | |
1700 | GPRReg arg1GPR = arg1.gpr(); |
1701 | GPRReg resultGPR = result.gpr(); |
1702 | |
1703 | FPRTemporary valueFPR(this); |
1704 | FPRTemporary tempFPR(this); |
1705 | |
1706 | bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid(); |
1707 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); |
1708 | Optional<GPRTemporary> scratch; |
1709 | GPRReg scratchGPR = InvalidGPRReg; |
1710 | if (shouldCheckMasqueradesAsUndefined) { |
1711 | scratch.emplace(this); |
1712 | scratchGPR = scratch->gpr(); |
1713 | } |
1714 | bool negateResult = true; |
1715 | m_jit.emitConvertValueToBoolean(vm(), JSValueRegs(arg1GPR), resultGPR, scratchGPR, valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject, negateResult); |
1716 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), resultGPR); |
1717 | jsValueResult(resultGPR, node, DataFormatJSBoolean); |
1718 | return; |
1719 | } |
1720 | case StringUse: |
1721 | return compileStringZeroLength(node); |
1722 | |
1723 | case StringOrOtherUse: |
1724 | return compileLogicalNotStringOrOther(node); |
1725 | |
1726 | default: |
1727 | DFG_CRASH(m_jit.graph(), node, "Bad use kind" ); |
1728 | break; |
1729 | } |
1730 | } |
1731 | |
1732 | void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken) |
1733 | { |
1734 | JSValueOperand value(this, nodeUse, ManualOperandSpeculation); |
1735 | GPRTemporary scratch(this); |
1736 | GPRTemporary structure; |
1737 | GPRReg valueGPR = value.gpr(); |
1738 | GPRReg scratchGPR = scratch.gpr(); |
1739 | GPRReg structureGPR = InvalidGPRReg; |
1740 | |
1741 | if (!masqueradesAsUndefinedWatchpointIsStillValid()) { |
1742 | GPRTemporary realStructure(this); |
1743 | structure.adopt(realStructure); |
1744 | structureGPR = structure.gpr(); |
1745 | } |
1746 | |
1747 | MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR)); |
1748 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { |
1749 | DFG_TYPE_CHECK( |
1750 | JSValueRegs(valueGPR), nodeUse, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(valueGPR)); |
1751 | } else { |
1752 | DFG_TYPE_CHECK( |
1753 | JSValueRegs(valueGPR), nodeUse, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(valueGPR)); |
1754 | |
1755 | JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8( |
1756 | JITCompiler::Zero, |
1757 | MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()), |
1758 | TrustedImm32(MasqueradesAsUndefined)); |
1759 | |
1760 | m_jit.emitLoadStructure(vm(), valueGPR, structureGPR, scratchGPR); |
1761 | speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse, |
1762 | m_jit.branchPtr( |
1763 | MacroAssembler::Equal, |
1764 | MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()), |
1765 | TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); |
1766 | |
1767 | isNotMasqueradesAsUndefined.link(&m_jit); |
1768 | } |
1769 | jump(taken, ForceJump); |
1770 | |
1771 | notCell.link(&m_jit); |
1772 | |
1773 | if (needsTypeCheck(nodeUse, SpecCellCheck | SpecOther)) { |
1774 | m_jit.move(valueGPR, scratchGPR); |
1775 | m_jit.and64(MacroAssembler::TrustedImm32(~JSValue::UndefinedTag), scratchGPR); |
1776 | typeCheck( |
1777 | JSValueRegs(valueGPR), nodeUse, SpecCellCheck | SpecOther, m_jit.branch64( |
1778 | MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(JSValue::ValueNull))); |
1779 | } |
1780 | jump(notTaken); |
1781 | |
1782 | noResult(m_currentNode); |
1783 | } |
1784 | |
1785 | void SpeculativeJIT::emitBranch(Node* node) |
1786 | { |
1787 | BasicBlock* taken = node->branchData()->taken.block; |
1788 | BasicBlock* notTaken = node->branchData()->notTaken.block; |
1789 | |
1790 | switch (node->child1().useKind()) { |
1791 | case ObjectOrOtherUse: { |
1792 | emitObjectOrOtherBranch(node->child1(), taken, notTaken); |
1793 | return; |
1794 | } |
1795 | |
1796 | case Int32Use: |
1797 | case DoubleRepUse: { |
1798 | if (node->child1().useKind() == Int32Use) { |
1799 | bool invert = false; |
1800 | |
1801 | if (taken == nextBlock()) { |
1802 | invert = true; |
1803 | BasicBlock* tmp = taken; |
1804 | taken = notTaken; |
1805 | notTaken = tmp; |
1806 | } |
1807 | |
1808 | SpeculateInt32Operand value(this, node->child1()); |
1809 | branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken); |
1810 | } else { |
1811 | SpeculateDoubleOperand value(this, node->child1()); |
1812 | FPRTemporary scratch(this); |
1813 | branchDoubleNonZero(value.fpr(), scratch.fpr(), taken); |
1814 | } |
1815 | |
1816 | jump(notTaken); |
1817 | |
1818 | noResult(node); |
1819 | return; |
1820 | } |
1821 | |
1822 | case StringUse: { |
1823 | emitStringBranch(node->child1(), taken, notTaken); |
1824 | return; |
1825 | } |
1826 | |
1827 | case StringOrOtherUse: { |
1828 | emitStringOrOtherBranch(node->child1(), taken, notTaken); |
1829 | return; |
1830 | } |
1831 | |
1832 | case UntypedUse: |
1833 | case BooleanUse: |
1834 | case KnownBooleanUse: { |
1835 | JSValueOperand value(this, node->child1(), ManualOperandSpeculation); |
1836 | GPRReg valueGPR = value.gpr(); |
1837 | |
1838 | if (node->child1().useKind() == BooleanUse || node->child1().useKind() == KnownBooleanUse) { |
1839 | if (!needsTypeCheck(node->child1(), SpecBoolean)) { |
1840 | MacroAssembler::ResultCondition condition = MacroAssembler::NonZero; |
1841 | |
1842 | if (taken == nextBlock()) { |
1843 | condition = MacroAssembler::Zero; |
1844 | BasicBlock* tmp = taken; |
1845 | taken = notTaken; |
1846 | notTaken = tmp; |
1847 | } |
1848 | |
1849 | branchTest32(condition, valueGPR, TrustedImm32(true), taken); |
1850 | jump(notTaken); |
1851 | } else { |
1852 | branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken); |
1853 | branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken); |
1854 | |
1855 | typeCheck(JSValueRegs(valueGPR), node->child1(), SpecBoolean, m_jit.jump()); |
1856 | } |
1857 | value.use(); |
1858 | } else { |
1859 | GPRTemporary result(this); |
1860 | FPRTemporary fprValue(this); |
1861 | FPRTemporary fprTemp(this); |
1862 | Optional<GPRTemporary> scratch; |
1863 | |
1864 | GPRReg scratchGPR = InvalidGPRReg; |
1865 | bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid(); |
1866 | if (shouldCheckMasqueradesAsUndefined) { |
1867 | scratch.emplace(this); |
1868 | scratchGPR = scratch->gpr(); |
1869 | } |
1870 | |
1871 | GPRReg resultGPR = result.gpr(); |
1872 | FPRReg valueFPR = fprValue.fpr(); |
1873 | FPRReg tempFPR = fprTemp.fpr(); |
1874 | |
1875 | if (node->child1()->prediction() & SpecInt32Only) { |
1876 | branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken); |
1877 | branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::numberTagRegister, taken); |
1878 | } |
1879 | |
1880 | if (node->child1()->prediction() & SpecBoolean) { |
1881 | branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken); |
1882 | branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken); |
1883 | } |
1884 | |
1885 | value.use(); |
1886 | |
1887 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); |
1888 | auto truthy = m_jit.branchIfTruthy(vm(), JSValueRegs(valueGPR), resultGPR, scratchGPR, valueFPR, tempFPR, shouldCheckMasqueradesAsUndefined, globalObject); |
1889 | addBranch(truthy, taken); |
1890 | jump(notTaken); |
1891 | } |
1892 | |
1893 | noResult(node, UseChildrenCalledExplicitly); |
1894 | return; |
1895 | } |
1896 | |
1897 | default: |
1898 | DFG_CRASH(m_jit.graph(), m_currentNode, "Bad use kind" ); |
1899 | } |
1900 | } |
1901 | |
1902 | void SpeculativeJIT::compile(Node* node) |
1903 | { |
1904 | NodeType op = node->op(); |
1905 | |
1906 | if (validateDFGDoesGC) { |
1907 | bool expectDoesGC = doesGC(m_jit.graph(), node); |
1908 | m_jit.store8(TrustedImm32(expectDoesGC), vm().heap.addressOfExpectDoesGC()); |
1909 | } |
1910 | |
1911 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) |
1912 | m_jit.clearRegisterAllocationOffsets(); |
1913 | #endif |
1914 | |
1915 | switch (op) { |
1916 | case JSConstant: |
1917 | case DoubleConstant: |
1918 | case Int52Constant: |
1919 | case PhantomDirectArguments: |
1920 | case PhantomClonedArguments: |
1921 | initConstantInfo(node); |
1922 | break; |
1923 | |
1924 | case LazyJSConstant: |
1925 | compileLazyJSConstant(node); |
1926 | break; |
1927 | |
1928 | case Identity: { |
1929 | compileIdentity(node); |
1930 | break; |
1931 | } |
1932 | |
1933 | case Inc: |
1934 | case Dec: |
1935 | compileIncOrDec(node); |
1936 | break; |
1937 | |
1938 | case GetLocal: { |
1939 | AbstractValue& value = m_state.operand(node->local()); |
1940 | |
1941 | // If the CFA is tracking this variable and it found that the variable |
1942 | // cannot have been assigned, then don't attempt to proceed. |
1943 | if (value.isClear()) { |
1944 | m_compileOkay = false; |
1945 | break; |
1946 | } |
1947 | |
1948 | switch (node->variableAccessData()->flushFormat()) { |
1949 | case FlushedDouble: { |
1950 | FPRTemporary result(this); |
1951 | m_jit.loadDouble(JITCompiler::addressFor(node->machineLocal()), result.fpr()); |
1952 | VirtualRegister virtualRegister = node->virtualRegister(); |
1953 | m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble); |
1954 | generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr()); |
1955 | break; |
1956 | } |
1957 | |
1958 | case FlushedInt32: { |
1959 | GPRTemporary result(this); |
1960 | m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); |
1961 | |
1962 | // Like int32Result, but don't useChildren - our children are phi nodes, |
1963 | // and don't represent values within this dataflow with virtual registers. |
1964 | VirtualRegister virtualRegister = node->virtualRegister(); |
1965 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger); |
1966 | generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr()); |
1967 | break; |
1968 | } |
1969 | |
1970 | case FlushedInt52: { |
1971 | GPRTemporary result(this); |
1972 | m_jit.load64(JITCompiler::addressFor(node->machineLocal()), result.gpr()); |
1973 | |
1974 | VirtualRegister virtualRegister = node->virtualRegister(); |
1975 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS); |
1976 | generationInfoFromVirtualRegister(virtualRegister).initInt52(node, node->refCount(), result.gpr()); |
1977 | break; |
1978 | } |
1979 | |
1980 | default: |
1981 | GPRTemporary result(this); |
1982 | m_jit.load64(JITCompiler::addressFor(node->machineLocal()), result.gpr()); |
1983 | |
1984 | // Like jsValueResult, but don't useChildren - our children are phi nodes, |
1985 | // and don't represent values within this dataflow with virtual registers. |
1986 | VirtualRegister virtualRegister = node->virtualRegister(); |
1987 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS); |
1988 | |
1989 | DataFormat format; |
1990 | if (isCellSpeculation(value.m_type)) |
1991 | format = DataFormatJSCell; |
1992 | else if (isBooleanSpeculation(value.m_type)) |
1993 | format = DataFormatJSBoolean; |
1994 | else |
1995 | format = DataFormatJS; |
1996 | |
1997 | generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), result.gpr(), format); |
1998 | break; |
1999 | } |
2000 | break; |
2001 | } |
2002 | |
2003 | case MovHint: { |
2004 | compileMovHint(m_currentNode); |
2005 | noResult(node); |
2006 | break; |
2007 | } |
2008 | |
2009 | case ZombieHint: { |
2010 | recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead); |
2011 | noResult(node); |
2012 | break; |
2013 | } |
2014 | |
2015 | case ExitOK: { |
2016 | noResult(node); |
2017 | break; |
2018 | } |
2019 | |
2020 | case SetLocal: { |
2021 | switch (node->variableAccessData()->flushFormat()) { |
2022 | case FlushedDouble: { |
2023 | SpeculateDoubleOperand value(this, node->child1()); |
2024 | m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal())); |
2025 | noResult(node); |
2026 | // Indicate that it's no longer necessary to retrieve the value of |
2027 | // this bytecode variable from registers or other locations in the stack, |
2028 | // but that it is stored as a double. |
2029 | recordSetLocal(DataFormatDouble); |
2030 | break; |
2031 | } |
2032 | |
2033 | case FlushedInt32: { |
2034 | SpeculateInt32Operand value(this, node->child1()); |
2035 | m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal())); |
2036 | noResult(node); |
2037 | recordSetLocal(DataFormatInt32); |
2038 | break; |
2039 | } |
2040 | |
2041 | case FlushedInt52: { |
2042 | SpeculateInt52Operand value(this, node->child1()); |
2043 | m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal())); |
2044 | noResult(node); |
2045 | recordSetLocal(DataFormatInt52); |
2046 | break; |
2047 | } |
2048 | |
2049 | case FlushedCell: { |
2050 | SpeculateCellOperand cell(this, node->child1()); |
2051 | GPRReg cellGPR = cell.gpr(); |
2052 | m_jit.store64(cellGPR, JITCompiler::addressFor(node->machineLocal())); |
2053 | noResult(node); |
2054 | recordSetLocal(DataFormatCell); |
2055 | break; |
2056 | } |
2057 | |
2058 | case FlushedBoolean: { |
2059 | SpeculateBooleanOperand boolean(this, node->child1()); |
2060 | m_jit.store64(boolean.gpr(), JITCompiler::addressFor(node->machineLocal())); |
2061 | noResult(node); |
2062 | recordSetLocal(DataFormatBoolean); |
2063 | break; |
2064 | } |
2065 | |
2066 | case FlushedJSValue: { |
2067 | JSValueOperand value(this, node->child1()); |
2068 | m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal())); |
2069 | noResult(node); |
2070 | recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat())); |
2071 | break; |
2072 | } |
2073 | |
2074 | default: |
2075 | DFG_CRASH(m_jit.graph(), node, "Bad flush format" ); |
2076 | break; |
2077 | } |
2078 | |
2079 | break; |
2080 | } |
2081 | |
2082 | case SetArgumentDefinitely: |
2083 | case SetArgumentMaybe: |
2084 | // This is a no-op; it just marks the fact that the argument is being used. |
2085 | // But it may be profitable to use this as a hook to run speculation checks |
2086 | // on arguments, thereby allowing us to trivially eliminate such checks if |
2087 | // the argument is not used. |
2088 | recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat())); |
2089 | break; |
2090 | |
2091 | case ValueBitNot: |
2092 | compileValueBitNot(node); |
2093 | break; |
2094 | |
2095 | case ArithBitNot: |
2096 | compileBitwiseNot(node); |
2097 | break; |
2098 | |
2099 | case ValueBitAnd: |
2100 | case ValueBitXor: |
2101 | case ValueBitOr: |
2102 | compileValueBitwiseOp(node); |
2103 | break; |
2104 | |
2105 | case ArithBitAnd: |
2106 | case ArithBitOr: |
2107 | case ArithBitXor: |
2108 | compileBitwiseOp(node); |
2109 | break; |
2110 | |
2111 | case ValueBitLShift: |
2112 | compileValueLShiftOp(node); |
2113 | break; |
2114 | |
2115 | case ValueBitRShift: |
2116 | compileValueBitRShift(node); |
2117 | break; |
2118 | |
2119 | case ArithBitRShift: |
2120 | case ArithBitLShift: |
2121 | case BitURShift: |
2122 | compileShiftOp(node); |
2123 | break; |
2124 | |
2125 | case UInt32ToNumber: { |
2126 | compileUInt32ToNumber(node); |
2127 | break; |
2128 | } |
2129 | |
2130 | case DoubleAsInt32: { |
2131 | compileDoubleAsInt32(node); |
2132 | break; |
2133 | } |
2134 | |
2135 | case ValueToInt32: { |
2136 | compileValueToInt32(node); |
2137 | break; |
2138 | } |
2139 | |
2140 | case DoubleRep: { |
2141 | compileDoubleRep(node); |
2142 | break; |
2143 | } |
2144 | |
2145 | case ValueRep: { |
2146 | compileValueRep(node); |
2147 | break; |
2148 | } |
2149 | |
2150 | case Int52Rep: { |
2151 | switch (node->child1().useKind()) { |
2152 | case Int32Use: { |
2153 | SpeculateInt32Operand operand(this, node->child1()); |
2154 | GPRTemporary result(this, Reuse, operand); |
2155 | |
2156 | m_jit.signExtend32ToPtr(operand.gpr(), result.gpr()); |
2157 | |
2158 | strictInt52Result(result.gpr(), node); |
2159 | break; |
2160 | } |
2161 | |
2162 | case AnyIntUse: { |
2163 | GPRTemporary result(this); |
2164 | GPRReg resultGPR = result.gpr(); |
2165 | |
2166 | convertAnyInt(node->child1(), resultGPR); |
2167 | |
2168 | strictInt52Result(resultGPR, node); |
2169 | break; |
2170 | } |
2171 | |
2172 | case DoubleRepAnyIntUse: { |
2173 | SpeculateDoubleOperand value(this, node->child1()); |
2174 | FPRReg valueFPR = value.fpr(); |
2175 | |
2176 | flushRegisters(); |
2177 | GPRFlushedCallResult result(this); |
2178 | GPRReg resultGPR = result.gpr(); |
2179 | callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR); |
2180 | |
2181 | DFG_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow, |
2182 | JSValueRegs(), node->child1(), SpecAnyIntAsDouble, |
2183 | m_jit.branch64( |
2184 | JITCompiler::Equal, resultGPR, |
2185 | JITCompiler::TrustedImm64(JSValue::notInt52))); |
2186 | |
2187 | strictInt52Result(resultGPR, node); |
2188 | break; |
2189 | } |
2190 | |
2191 | default: |
2192 | DFG_CRASH(m_jit.graph(), node, "Bad use kind" ); |
2193 | } |
2194 | break; |
2195 | } |
2196 | |
2197 | case ValueNegate: |
2198 | compileValueNegate(node); |
2199 | break; |
2200 | |
2201 | case ValueAdd: |
2202 | compileValueAdd(node); |
2203 | break; |
2204 | |
2205 | case ValueSub: |
2206 | compileValueSub(node); |
2207 | break; |
2208 | |
2209 | case StrCat: { |
2210 | compileStrCat(node); |
2211 | break; |
2212 | } |
2213 | |
2214 | case ArithAdd: |
2215 | compileArithAdd(node); |
2216 | break; |
2217 | |
2218 | case ArithClz32: |
2219 | compileArithClz32(node); |
2220 | break; |
2221 | |
2222 | case MakeRope: |
2223 | compileMakeRope(node); |
2224 | break; |
2225 | |
2226 | case ArithSub: |
2227 | compileArithSub(node); |
2228 | break; |
2229 | |
2230 | case ArithNegate: |
2231 | compileArithNegate(node); |
2232 | break; |
2233 | |
2234 | case ArithMul: |
2235 | compileArithMul(node); |
2236 | break; |
2237 | |
2238 | case ValueMul: |
2239 | compileValueMul(node); |
2240 | break; |
2241 | |
2242 | case ValueDiv: { |
2243 | compileValueDiv(node); |
2244 | break; |
2245 | } |
2246 | |
2247 | case ArithDiv: { |
2248 | compileArithDiv(node); |
2249 | break; |
2250 | } |
2251 | |
2252 | case ValueMod: { |
2253 | compileValueMod(node); |
2254 | break; |
2255 | } |
2256 | |
2257 | case ArithMod: { |
2258 | compileArithMod(node); |
2259 | break; |
2260 | } |
2261 | |
2262 | case ArithAbs: |
2263 | compileArithAbs(node); |
2264 | break; |
2265 | |
2266 | case ArithMin: |
2267 | case ArithMax: { |
2268 | compileArithMinMax(node); |
2269 | break; |
2270 | } |
2271 | |
2272 | case ValuePow: |
2273 | compileValuePow(node); |
2274 | break; |
2275 | |
2276 | case ArithPow: |
2277 | compileArithPow(node); |
2278 | break; |
2279 | |
2280 | case ArithSqrt: |
2281 | compileArithSqrt(node); |
2282 | break; |
2283 | |
2284 | case ArithFRound: |
2285 | compileArithFRound(node); |
2286 | break; |
2287 | |
2288 | case ArithRandom: |
2289 | compileArithRandom(node); |
2290 | break; |
2291 | |
2292 | case ArithRound: |
2293 | case ArithFloor: |
2294 | case ArithCeil: |
2295 | case ArithTrunc: |
2296 | compileArithRounding(node); |
2297 | break; |
2298 | |
2299 | case ArithUnary: |
2300 | compileArithUnary(node); |
2301 | break; |
2302 | |
2303 | case LogicalNot: |
2304 | compileLogicalNot(node); |
2305 | break; |
2306 | |
2307 | case CompareLess: |
2308 | if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess)) |
2309 | return; |
2310 | break; |
2311 | |
2312 | case CompareLessEq: |
2313 | if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq)) |
2314 | return; |
2315 | break; |
2316 | |
2317 | case CompareGreater: |
2318 | if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater)) |
2319 | return; |
2320 | break; |
2321 | |
2322 | case CompareGreaterEq: |
2323 | if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq)) |
2324 | return; |
2325 | break; |
2326 | |
2327 | case CompareBelow: |
2328 | compileCompareUnsigned(node, JITCompiler::Below); |
2329 | break; |
2330 | |
2331 | case CompareBelowEq: |
2332 | compileCompareUnsigned(node, JITCompiler::BelowOrEqual); |
2333 | break; |
2334 | |
2335 | case CompareEq: |
2336 | if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq)) |
2337 | return; |
2338 | break; |
2339 | |
2340 | case CompareStrictEq: |
2341 | if (compileStrictEq(node)) |
2342 | return; |
2343 | break; |
2344 | |
2345 | case CompareEqPtr: |
2346 | compileCompareEqPtr(node); |
2347 | break; |
2348 | |
2349 | case SameValue: |
2350 | compileSameValue(node); |
2351 | break; |
2352 | |
2353 | case StringCharCodeAt: { |
2354 | compileGetCharCodeAt(node); |
2355 | break; |
2356 | } |
2357 | |
2358 | case StringCodePointAt: { |
2359 | compileStringCodePointAt(node); |
2360 | break; |
2361 | } |
2362 | |
2363 | case StringCharAt: { |
2364 | // Relies on StringCharAt node having same basic layout as GetByVal |
2365 | compileGetByValOnString(node); |
2366 | break; |
2367 | } |
2368 | |
2369 | case StringFromCharCode: { |
2370 | compileFromCharCode(node); |
2371 | break; |
2372 | } |
2373 | |
2374 | case CheckArray: { |
2375 | checkArray(node); |
2376 | break; |
2377 | } |
2378 | |
2379 | case Arrayify: |
2380 | case ArrayifyToStructure: { |
2381 | arrayify(node); |
2382 | break; |
2383 | } |
2384 | |
2385 | case GetByVal: { |
2386 | switch (node->arrayMode().type()) { |
2387 | case Array::AnyTypedArray: |
2388 | case Array::ForceExit: |
2389 | case Array::SelectUsingArguments: |
2390 | case Array::SelectUsingPredictions: |
2391 | case Array::Unprofiled: |
2392 | DFG_CRASH(m_jit.graph(), node, "Bad array mode type" ); |
2393 | break; |
2394 | case Array::Undecided: { |
2395 | SpeculateStrictInt32Operand index(this, m_graph.varArgChild(node, 1)); |
2396 | GPRTemporary result(this, Reuse, index); |
2397 | GPRReg indexGPR = index.gpr(); |
2398 | GPRReg resultGPR = result.gpr(); |
2399 | |
2400 | speculationCheck(OutOfBounds, JSValueRegs(), node, |
2401 | m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0))); |
2402 | |
2403 | use(m_graph.varArgChild(node, 0)); |
2404 | index.use(); |
2405 | |
2406 | m_jit.move(MacroAssembler::TrustedImm64(JSValue::ValueUndefined), resultGPR); |
2407 | jsValueResult(resultGPR, node, UseChildrenCalledExplicitly); |
2408 | break; |
2409 | } |
2410 | case Array::Generic: { |
2411 | if (m_graph.m_slowGetByVal.contains(node)) { |
2412 | if (m_graph.varArgChild(node, 0).useKind() == ObjectUse) { |
2413 | if (m_graph.varArgChild(node, 1).useKind() == StringUse) { |
2414 | compileGetByValForObjectWithString(node); |
2415 | break; |
2416 | } |
2417 | |
2418 | if (m_graph.varArgChild(node, 1).useKind() == SymbolUse) { |
2419 | compileGetByValForObjectWithSymbol(node); |
2420 | break; |
2421 | } |
2422 | } |
2423 | |
2424 | JSValueOperand base(this, m_graph.varArgChild(node, 0)); |
2425 | JSValueOperand property(this, m_graph.varArgChild(node, 1)); |
2426 | GPRReg baseGPR = base.gpr(); |
2427 | GPRReg propertyGPR = property.gpr(); |
2428 | |
2429 | flushRegisters(); |
2430 | GPRFlushedCallResult result(this); |
2431 | callOperation(operationGetByVal, result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, propertyGPR); |
2432 | m_jit.exceptionCheck(); |
2433 | |
2434 | jsValueResult(result.gpr(), node); |
2435 | break; |
2436 | } |
2437 | |
2438 | speculate(node, m_graph.varArgChild(node, 0)); |
2439 | speculate(node, m_graph.varArgChild(node, 1)); |
2440 | |
2441 | JSValueOperand base(this, m_graph.varArgChild(node, 0), ManualOperandSpeculation); |
2442 | JSValueOperand property(this, m_graph.varArgChild(node, 1), ManualOperandSpeculation); |
2443 | GPRTemporary result(this, Reuse, property); |
2444 | GPRReg baseGPR = base.gpr(); |
2445 | GPRReg propertyGPR = property.gpr(); |
2446 | GPRReg resultGPR = result.gpr(); |
2447 | |
2448 | CodeOrigin codeOrigin = node->origin.semantic; |
2449 | CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); |
2450 | RegisterSet usedRegisters = this->usedRegisters(); |
2451 | |
2452 | JITCompiler::JumpList slowCases; |
2453 | if (!m_state.forNode(m_graph.varArgChild(node, 0)).isType(SpecCell)) |
2454 | slowCases.append(m_jit.branchIfNotCell(baseGPR)); |
2455 | |
2456 | JITGetByValGenerator gen( |
2457 | m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, |
2458 | JSValueRegs(baseGPR), JSValueRegs(propertyGPR), JSValueRegs(resultGPR)); |
2459 | |
2460 | if (m_state.forNode(m_graph.varArgChild(node, 1)).isType(SpecString)) |
2461 | gen.stubInfo()->propertyIsString = true; |
2462 | else if (m_state.forNode(m_graph.varArgChild(node, 1)).isType(SpecInt32Only)) |
2463 | gen.stubInfo()->propertyIsInt32 = true; |
2464 | else if (m_state.forNode(m_graph.varArgChild(node, 1)).isType(SpecSymbol)) |
2465 | gen.stubInfo()->propertyIsSymbol = true; |
2466 | |
2467 | gen.generateFastPath(m_jit); |
2468 | |
2469 | slowCases.append(gen.slowPathJump()); |
2470 | |
2471 | std::unique_ptr<SlowPathGenerator> slowPath = slowPathCall( |
2472 | slowCases, this, operationGetByValOptimize, |
2473 | resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), nullptr, baseGPR, propertyGPR); |
2474 | |
2475 | m_jit.addGetByVal(gen, slowPath.get()); |
2476 | addSlowPathGenerator(WTFMove(slowPath)); |
2477 | |
2478 | jsValueResult(resultGPR, node); |
2479 | break; |
2480 | } |
2481 | case Array::Int32: |
2482 | case Array::Contiguous: { |
2483 | if (node->arrayMode().isInBounds()) { |
2484 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2485 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2486 | |
2487 | GPRReg propertyReg = property.gpr(); |
2488 | GPRReg storageReg = storage.gpr(); |
2489 | |
2490 | if (!m_compileOkay) |
2491 | return; |
2492 | |
2493 | speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
2494 | |
2495 | GPRTemporary result(this); |
2496 | |
2497 | m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr()); |
2498 | if (node->arrayMode().isSaneChain()) { |
2499 | ASSERT(node->arrayMode().type() == Array::Contiguous); |
2500 | JITCompiler::Jump notHole = m_jit.branchIfNotEmpty(result.gpr()); |
2501 | m_jit.move(TrustedImm64(JSValue::encode(jsUndefined())), result.gpr()); |
2502 | notHole.link(&m_jit); |
2503 | } else { |
2504 | speculationCheck( |
2505 | LoadFromHole, JSValueRegs(), 0, |
2506 | m_jit.branchIfEmpty(result.gpr())); |
2507 | } |
2508 | jsValueResult(result.gpr(), node, node->arrayMode().type() == Array::Int32 ? DataFormatJSInt32 : DataFormatJS); |
2509 | break; |
2510 | } |
2511 | |
2512 | SpeculateCellOperand base(this, m_graph.varArgChild(node, 0)); |
2513 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2514 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2515 | |
2516 | GPRReg baseReg = base.gpr(); |
2517 | GPRReg propertyReg = property.gpr(); |
2518 | GPRReg storageReg = storage.gpr(); |
2519 | |
2520 | if (!m_compileOkay) |
2521 | return; |
2522 | |
2523 | GPRTemporary result(this); |
2524 | GPRReg resultReg = result.gpr(); |
2525 | |
2526 | MacroAssembler::JumpList slowCases; |
2527 | |
2528 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
2529 | |
2530 | m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg); |
2531 | slowCases.append(m_jit.branchIfEmpty(resultReg)); |
2532 | |
2533 | addSlowPathGenerator( |
2534 | slowPathCall( |
2535 | slowCases, this, operationGetByValObjectInt, |
2536 | result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseReg, propertyReg)); |
2537 | |
2538 | jsValueResult(resultReg, node); |
2539 | break; |
2540 | } |
2541 | |
2542 | case Array::Double: { |
2543 | if (node->arrayMode().isInBounds()) { |
2544 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2545 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2546 | |
2547 | GPRReg propertyReg = property.gpr(); |
2548 | GPRReg storageReg = storage.gpr(); |
2549 | |
2550 | if (!m_compileOkay) |
2551 | return; |
2552 | |
2553 | FPRTemporary result(this); |
2554 | FPRReg resultReg = result.fpr(); |
2555 | |
2556 | speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
2557 | |
2558 | m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg); |
2559 | if (!node->arrayMode().isSaneChain()) |
2560 | speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchIfNaN(resultReg)); |
2561 | doubleResult(resultReg, node); |
2562 | break; |
2563 | } |
2564 | |
2565 | SpeculateCellOperand base(this, m_graph.varArgChild(node, 0)); |
2566 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2567 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2568 | |
2569 | GPRReg baseReg = base.gpr(); |
2570 | GPRReg propertyReg = property.gpr(); |
2571 | GPRReg storageReg = storage.gpr(); |
2572 | |
2573 | if (!m_compileOkay) |
2574 | return; |
2575 | |
2576 | GPRTemporary result(this); |
2577 | FPRTemporary temp(this); |
2578 | GPRReg resultReg = result.gpr(); |
2579 | FPRReg tempReg = temp.fpr(); |
2580 | |
2581 | MacroAssembler::JumpList slowCases; |
2582 | |
2583 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
2584 | |
2585 | m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg); |
2586 | slowCases.append(m_jit.branchIfNaN(tempReg)); |
2587 | boxDouble(tempReg, resultReg); |
2588 | |
2589 | addSlowPathGenerator( |
2590 | slowPathCall( |
2591 | slowCases, this, operationGetByValObjectInt, |
2592 | result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseReg, propertyReg)); |
2593 | |
2594 | jsValueResult(resultReg, node); |
2595 | break; |
2596 | } |
2597 | |
2598 | case Array::ArrayStorage: |
2599 | case Array::SlowPutArrayStorage: { |
2600 | if (node->arrayMode().isInBounds()) { |
2601 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2602 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2603 | |
2604 | GPRReg propertyReg = property.gpr(); |
2605 | GPRReg storageReg = storage.gpr(); |
2606 | |
2607 | if (!m_compileOkay) |
2608 | return; |
2609 | |
2610 | speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); |
2611 | |
2612 | GPRTemporary result(this); |
2613 | m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), result.gpr()); |
2614 | speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchIfEmpty(result.gpr())); |
2615 | |
2616 | jsValueResult(result.gpr(), node); |
2617 | break; |
2618 | } |
2619 | |
2620 | SpeculateCellOperand base(this, m_graph.varArgChild(node, 0)); |
2621 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2622 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2623 | |
2624 | GPRReg baseReg = base.gpr(); |
2625 | GPRReg propertyReg = property.gpr(); |
2626 | GPRReg storageReg = storage.gpr(); |
2627 | |
2628 | if (!m_compileOkay) |
2629 | return; |
2630 | |
2631 | GPRTemporary result(this); |
2632 | GPRReg resultReg = result.gpr(); |
2633 | |
2634 | MacroAssembler::JumpList slowCases; |
2635 | |
2636 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); |
2637 | |
2638 | m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), resultReg); |
2639 | slowCases.append(m_jit.branchIfEmpty(resultReg)); |
2640 | |
2641 | addSlowPathGenerator( |
2642 | slowPathCall( |
2643 | slowCases, this, operationGetByValObjectInt, |
2644 | result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseReg, propertyReg)); |
2645 | |
2646 | jsValueResult(resultReg, node); |
2647 | break; |
2648 | } |
2649 | case Array::String: |
2650 | compileGetByValOnString(node); |
2651 | break; |
2652 | case Array::DirectArguments: |
2653 | compileGetByValOnDirectArguments(node); |
2654 | break; |
2655 | case Array::ScopedArguments: |
2656 | compileGetByValOnScopedArguments(node); |
2657 | break; |
2658 | case Array::Int8Array: |
2659 | case Array::Int16Array: |
2660 | case Array::Int32Array: |
2661 | case Array::Uint8Array: |
2662 | case Array::Uint8ClampedArray: |
2663 | case Array::Uint16Array: |
2664 | case Array::Uint32Array: |
2665 | case Array::Float32Array: |
2666 | case Array::Float64Array: { |
2667 | TypedArrayType type = node->arrayMode().typedArrayType(); |
2668 | if (isInt(type)) |
2669 | compileGetByValOnIntTypedArray(node, type); |
2670 | else |
2671 | compileGetByValOnFloatTypedArray(node, type); |
2672 | } } |
2673 | break; |
2674 | } |
2675 | |
2676 | case GetByValWithThis: { |
2677 | compileGetByValWithThis(node); |
2678 | break; |
2679 | } |
2680 | |
2681 | case PutByValDirect: |
2682 | case PutByVal: |
2683 | case PutByValAlias: { |
2684 | Edge child1 = m_jit.graph().varArgChild(node, 0); |
2685 | Edge child2 = m_jit.graph().varArgChild(node, 1); |
2686 | Edge child3 = m_jit.graph().varArgChild(node, 2); |
2687 | Edge child4 = m_jit.graph().varArgChild(node, 3); |
2688 | |
2689 | ArrayMode arrayMode = node->arrayMode().modeForPut(); |
2690 | bool alreadyHandled = false; |
2691 | |
2692 | switch (arrayMode.type()) { |
2693 | case Array::SelectUsingPredictions: |
2694 | case Array::ForceExit: |
2695 | DFG_CRASH(m_jit.graph(), node, "Bad array mode type" ); |
2696 | break; |
2697 | case Array::Generic: { |
2698 | DFG_ASSERT(m_jit.graph(), node, node->op() == PutByVal || node->op() == PutByValDirect, node->op()); |
2699 | |
2700 | if (child1.useKind() == CellUse) { |
2701 | if (child2.useKind() == StringUse) { |
2702 | compilePutByValForCellWithString(node, child1, child2, child3); |
2703 | alreadyHandled = true; |
2704 | break; |
2705 | } |
2706 | |
2707 | if (child2.useKind() == SymbolUse) { |
2708 | compilePutByValForCellWithSymbol(node, child1, child2, child3); |
2709 | alreadyHandled = true; |
2710 | break; |
2711 | } |
2712 | } |
2713 | |
2714 | JSValueOperand arg1(this, child1); |
2715 | JSValueOperand arg2(this, child2); |
2716 | JSValueOperand arg3(this, child3); |
2717 | GPRReg arg1GPR = arg1.gpr(); |
2718 | GPRReg arg2GPR = arg2.gpr(); |
2719 | GPRReg arg3GPR = arg3.gpr(); |
2720 | flushRegisters(); |
2721 | if (node->op() == PutByValDirect) |
2722 | callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1GPR, arg2GPR, arg3GPR); |
2723 | else |
2724 | callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1GPR, arg2GPR, arg3GPR); |
2725 | m_jit.exceptionCheck(); |
2726 | |
2727 | noResult(node); |
2728 | alreadyHandled = true; |
2729 | break; |
2730 | } |
2731 | default: |
2732 | break; |
2733 | } |
2734 | |
2735 | if (alreadyHandled) |
2736 | break; |
2737 | |
2738 | SpeculateCellOperand base(this, child1); |
2739 | SpeculateStrictInt32Operand property(this, child2); |
2740 | |
2741 | GPRReg baseReg = base.gpr(); |
2742 | GPRReg propertyReg = property.gpr(); |
2743 | |
2744 | switch (arrayMode.type()) { |
2745 | case Array::Int32: |
2746 | case Array::Contiguous: { |
2747 | JSValueOperand value(this, child3, ManualOperandSpeculation); |
2748 | |
2749 | GPRReg valueReg = value.gpr(); |
2750 | |
2751 | if (!m_compileOkay) |
2752 | return; |
2753 | |
2754 | if (arrayMode.type() == Array::Int32) { |
2755 | DFG_TYPE_CHECK( |
2756 | JSValueRegs(valueReg), child3, SpecInt32Only, |
2757 | m_jit.branchIfNotInt32(valueReg)); |
2758 | } |
2759 | |
2760 | StorageOperand storage(this, child4); |
2761 | GPRReg storageReg = storage.gpr(); |
2762 | |
2763 | if (node->op() == PutByValAlias) { |
2764 | // Store the value to the array. |
2765 | GPRReg propertyReg = property.gpr(); |
2766 | GPRReg valueReg = value.gpr(); |
2767 | m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight)); |
2768 | |
2769 | noResult(node); |
2770 | break; |
2771 | } |
2772 | |
2773 | GPRTemporary temporary; |
2774 | GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node); |
2775 | |
2776 | MacroAssembler::Jump slowCase; |
2777 | |
2778 | if (arrayMode.isInBounds()) { |
2779 | speculationCheck( |
2780 | OutOfBounds, JSValueRegs(), 0, |
2781 | m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
2782 | } else { |
2783 | MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); |
2784 | |
2785 | slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength())); |
2786 | |
2787 | if (!arrayMode.isOutOfBounds()) |
2788 | speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase); |
2789 | |
2790 | m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg); |
2791 | m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); |
2792 | |
2793 | inBounds.link(&m_jit); |
2794 | } |
2795 | |
2796 | m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight)); |
2797 | |
2798 | base.use(); |
2799 | property.use(); |
2800 | value.use(); |
2801 | storage.use(); |
2802 | |
2803 | if (arrayMode.isOutOfBounds()) { |
2804 | addSlowPathGenerator(slowPathCall( |
2805 | slowCase, this, |
2806 | m_jit.isStrictModeFor(node->origin.semantic) |
2807 | ? (node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict) |
2808 | : (node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict), |
2809 | NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseReg, propertyReg, valueReg)); |
2810 | } |
2811 | |
2812 | noResult(node, UseChildrenCalledExplicitly); |
2813 | break; |
2814 | } |
2815 | |
2816 | case Array::Double: { |
2817 | compileDoublePutByVal(node, base, property); |
2818 | break; |
2819 | } |
2820 | |
2821 | case Array::ArrayStorage: |
2822 | case Array::SlowPutArrayStorage: { |
2823 | JSValueOperand value(this, child3); |
2824 | |
2825 | GPRReg valueReg = value.gpr(); |
2826 | |
2827 | if (!m_compileOkay) |
2828 | return; |
2829 | |
2830 | StorageOperand storage(this, child4); |
2831 | GPRReg storageReg = storage.gpr(); |
2832 | |
2833 | if (node->op() == PutByValAlias) { |
2834 | // Store the value to the array. |
2835 | GPRReg propertyReg = property.gpr(); |
2836 | GPRReg valueReg = value.gpr(); |
2837 | m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset())); |
2838 | |
2839 | noResult(node); |
2840 | break; |
2841 | } |
2842 | |
2843 | GPRTemporary temporary; |
2844 | GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node); |
2845 | |
2846 | MacroAssembler::JumpList slowCases; |
2847 | |
2848 | MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); |
2849 | if (!arrayMode.isOutOfBounds()) |
2850 | speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds); |
2851 | else |
2852 | slowCases.append(beyondArrayBounds); |
2853 | |
2854 | // Check if we're writing to a hole; if so increment m_numValuesInVector. |
2855 | if (arrayMode.isInBounds()) { |
2856 | speculationCheck( |
2857 | StoreToHole, JSValueRegs(), 0, |
2858 | m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()))); |
2859 | } else { |
2860 | MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset())); |
2861 | if (arrayMode.isSlowPut()) { |
2862 | // This is sort of strange. If we wanted to optimize this code path, we would invert |
2863 | // the above branch. But it's simply not worth it since this only happens if we're |
2864 | // already having a bad time. |
2865 | slowCases.append(m_jit.jump()); |
2866 | } else { |
2867 | m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); |
2868 | |
2869 | // If we're writing to a hole we might be growing the array; |
2870 | MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); |
2871 | m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg); |
2872 | m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); |
2873 | |
2874 | lengthDoesNotNeedUpdate.link(&m_jit); |
2875 | } |
2876 | notHoleValue.link(&m_jit); |
2877 | } |
2878 | |
2879 | // Store the value to the array. |
2880 | m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset())); |
2881 | |
2882 | base.use(); |
2883 | property.use(); |
2884 | value.use(); |
2885 | storage.use(); |
2886 | |
2887 | if (!slowCases.empty()) { |
2888 | addSlowPathGenerator(slowPathCall( |
2889 | slowCases, this, |
2890 | m_jit.isStrictModeFor(node->origin.semantic) |
2891 | ? (node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsStrict) |
2892 | : (node->op() == PutByValDirect ? operationPutByValDirectBeyondArrayBoundsNonStrict : operationPutByValBeyondArrayBoundsNonStrict), |
2893 | NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseReg, propertyReg, valueReg)); |
2894 | } |
2895 | |
2896 | noResult(node, UseChildrenCalledExplicitly); |
2897 | break; |
2898 | } |
2899 | |
2900 | case Array::Int8Array: |
2901 | case Array::Int16Array: |
2902 | case Array::Int32Array: |
2903 | case Array::Uint8Array: |
2904 | case Array::Uint8ClampedArray: |
2905 | case Array::Uint16Array: |
2906 | case Array::Uint32Array: |
2907 | case Array::Float32Array: |
2908 | case Array::Float64Array: { |
2909 | TypedArrayType type = arrayMode.typedArrayType(); |
2910 | if (isInt(type)) |
2911 | compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type); |
2912 | else |
2913 | compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type); |
2914 | break; |
2915 | } |
2916 | |
2917 | case Array::AnyTypedArray: |
2918 | case Array::String: |
2919 | case Array::DirectArguments: |
2920 | case Array::ForceExit: |
2921 | case Array::Generic: |
2922 | case Array::ScopedArguments: |
2923 | case Array::SelectUsingArguments: |
2924 | case Array::SelectUsingPredictions: |
2925 | case Array::Undecided: |
2926 | case Array::Unprofiled: |
2927 | RELEASE_ASSERT_NOT_REACHED(); |
2928 | } |
2929 | break; |
2930 | } |
2931 | |
2932 | case AtomicsAdd: |
2933 | case AtomicsAnd: |
2934 | case AtomicsCompareExchange: |
2935 | case AtomicsExchange: |
2936 | case AtomicsLoad: |
2937 | case AtomicsOr: |
2938 | case AtomicsStore: |
2939 | case AtomicsSub: |
2940 | case AtomicsXor: { |
2941 | unsigned = numExtraAtomicsArgs(node->op()); |
2942 | Edge baseEdge = m_jit.graph().child(node, 0); |
2943 | Edge indexEdge = m_jit.graph().child(node, 1); |
2944 | Edge argEdges[maxNumExtraAtomicsArgs]; |
2945 | for (unsigned i = numExtraArgs; i--;) |
2946 | argEdges[i] = m_jit.graph().child(node, 2 + i); |
2947 | Edge storageEdge = m_jit.graph().child(node, 2 + numExtraArgs); |
2948 | |
2949 | GPRReg baseGPR; |
2950 | GPRReg indexGPR; |
2951 | GPRReg argGPRs[2]; |
2952 | GPRReg resultGPR; |
2953 | |
2954 | auto callSlowPath = [&] () { |
2955 | auto globalObjectImmPtr = TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)); |
2956 | switch (node->op()) { |
2957 | case AtomicsAdd: |
2958 | callOperation(operationAtomicsAdd, resultGPR, globalObjectImmPtr, baseGPR, indexGPR, argGPRs[0]); |
2959 | break; |
2960 | case AtomicsAnd: |
2961 | callOperation(operationAtomicsAnd, resultGPR, globalObjectImmPtr, baseGPR, indexGPR, argGPRs[0]); |
2962 | break; |
2963 | case AtomicsCompareExchange: |
2964 | callOperation(operationAtomicsCompareExchange, resultGPR, globalObjectImmPtr, baseGPR, indexGPR, argGPRs[0], argGPRs[1]); |
2965 | break; |
2966 | case AtomicsExchange: |
2967 | callOperation(operationAtomicsExchange, resultGPR, globalObjectImmPtr, baseGPR, indexGPR, argGPRs[0]); |
2968 | break; |
2969 | case AtomicsLoad: |
2970 | callOperation(operationAtomicsLoad, resultGPR, globalObjectImmPtr, baseGPR, indexGPR); |
2971 | break; |
2972 | case AtomicsOr: |
2973 | callOperation(operationAtomicsOr, resultGPR, globalObjectImmPtr, baseGPR, indexGPR, argGPRs[0]); |
2974 | break; |
2975 | case AtomicsStore: |
2976 | callOperation(operationAtomicsStore, resultGPR, globalObjectImmPtr, baseGPR, indexGPR, argGPRs[0]); |
2977 | break; |
2978 | case AtomicsSub: |
2979 | callOperation(operationAtomicsSub, resultGPR, globalObjectImmPtr, baseGPR, indexGPR, argGPRs[0]); |
2980 | break; |
2981 | case AtomicsXor: |
2982 | callOperation(operationAtomicsXor, resultGPR, globalObjectImmPtr, baseGPR, indexGPR, argGPRs[0]); |
2983 | break; |
2984 | default: |
2985 | RELEASE_ASSERT_NOT_REACHED(); |
2986 | break; |
2987 | } |
2988 | }; |
2989 | |
2990 | if (!storageEdge) { |
2991 | // We are in generic mode! |
2992 | JSValueOperand base(this, baseEdge); |
2993 | JSValueOperand index(this, indexEdge); |
2994 | Optional<JSValueOperand> args[2]; |
2995 | baseGPR = base.gpr(); |
2996 | indexGPR = index.gpr(); |
2997 | for (unsigned i = numExtraArgs; i--;) { |
2998 | args[i].emplace(this, argEdges[i]); |
2999 | argGPRs[i] = args[i]->gpr(); |
3000 | } |
3001 | |
3002 | flushRegisters(); |
3003 | GPRFlushedCallResult result(this); |
3004 | resultGPR = result.gpr(); |
3005 | callSlowPath(); |
3006 | m_jit.exceptionCheck(); |
3007 | |
3008 | jsValueResult(resultGPR, node); |
3009 | break; |
3010 | } |
3011 | |
3012 | TypedArrayType type = node->arrayMode().typedArrayType(); |
3013 | |
3014 | SpeculateCellOperand base(this, baseEdge); |
3015 | SpeculateStrictInt32Operand index(this, indexEdge); |
3016 | |
3017 | baseGPR = base.gpr(); |
3018 | indexGPR = index.gpr(); |
3019 | |
3020 | emitTypedArrayBoundsCheck(node, baseGPR, indexGPR); |
3021 | |
3022 | GPRTemporary args[2]; |
3023 | |
3024 | JITCompiler::JumpList slowPathCases; |
3025 | |
3026 | bool ok = true; |
3027 | for (unsigned i = numExtraArgs; i--;) { |
3028 | if (!getIntTypedArrayStoreOperand(args[i], indexGPR, argEdges[i], slowPathCases)) { |
3029 | noResult(node); |
3030 | ok = false; |
3031 | } |
3032 | argGPRs[i] = args[i].gpr(); |
3033 | } |
3034 | if (!ok) |
3035 | break; |
3036 | |
3037 | StorageOperand storage(this, storageEdge); |
3038 | GPRTemporary oldValue(this); |
3039 | GPRTemporary result(this); |
3040 | GPRTemporary newValue(this); |
3041 | GPRReg storageGPR = storage.gpr(); |
3042 | GPRReg oldValueGPR = oldValue.gpr(); |
3043 | resultGPR = result.gpr(); |
3044 | GPRReg newValueGPR = newValue.gpr(); |
3045 | |
3046 | // FIXME: It shouldn't be necessary to nop-pad between register allocation and a jump label. |
3047 | // https://bugs.webkit.org/show_bug.cgi?id=170974 |
3048 | m_jit.nop(); |
3049 | |
3050 | JITCompiler::Label loop = m_jit.label(); |
3051 | |
3052 | loadFromIntTypedArray(storageGPR, indexGPR, oldValueGPR, type); |
3053 | m_jit.move(oldValueGPR, newValueGPR); |
3054 | m_jit.move(oldValueGPR, resultGPR); |
3055 | |
3056 | switch (node->op()) { |
3057 | case AtomicsAdd: |
3058 | m_jit.add32(argGPRs[0], newValueGPR); |
3059 | break; |
3060 | case AtomicsAnd: |
3061 | m_jit.and32(argGPRs[0], newValueGPR); |
3062 | break; |
3063 | case AtomicsCompareExchange: { |
3064 | switch (elementSize(type)) { |
3065 | case 1: |
3066 | if (isSigned(type)) |
3067 | m_jit.signExtend8To32(argGPRs[0], argGPRs[0]); |
3068 | else |
3069 | m_jit.and32(TrustedImm32(0xff), argGPRs[0]); |
3070 | break; |
3071 | case 2: |
3072 | if (isSigned(type)) |
3073 | m_jit.signExtend16To32(argGPRs[0], argGPRs[0]); |
3074 | else |
3075 | m_jit.and32(TrustedImm32(0xffff), argGPRs[0]); |
3076 | break; |
3077 | case 4: |
3078 | break; |
3079 | default: |
3080 | RELEASE_ASSERT_NOT_REACHED(); |
3081 | break; |
3082 | } |
3083 | JITCompiler::Jump fail = m_jit.branch32(JITCompiler::NotEqual, oldValueGPR, argGPRs[0]); |
3084 | m_jit.move(argGPRs[1], newValueGPR); |
3085 | fail.link(&m_jit); |
3086 | break; |
3087 | } |
3088 | case AtomicsExchange: |
3089 | m_jit.move(argGPRs[0], newValueGPR); |
3090 | break; |
3091 | case AtomicsLoad: |
3092 | break; |
3093 | case AtomicsOr: |
3094 | m_jit.or32(argGPRs[0], newValueGPR); |
3095 | break; |
3096 | case AtomicsStore: |
3097 | m_jit.move(argGPRs[0], newValueGPR); |
3098 | m_jit.move(argGPRs[0], resultGPR); |
3099 | break; |
3100 | case AtomicsSub: |
3101 | m_jit.sub32(argGPRs[0], newValueGPR); |
3102 | break; |
3103 | case AtomicsXor: |
3104 | m_jit.xor32(argGPRs[0], newValueGPR); |
3105 | break; |
3106 | default: |
3107 | RELEASE_ASSERT_NOT_REACHED(); |
3108 | break; |
3109 | } |
3110 | |
3111 | JITCompiler::JumpList success; |
3112 | switch (elementSize(type)) { |
3113 | case 1: |
3114 | success = m_jit.branchAtomicWeakCAS8(JITCompiler::Success, oldValueGPR, newValueGPR, JITCompiler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesOne)); |
3115 | break; |
3116 | case 2: |
3117 | success = m_jit.branchAtomicWeakCAS16(JITCompiler::Success, oldValueGPR, newValueGPR, JITCompiler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesTwo)); |
3118 | break; |
3119 | case 4: |
3120 | success = m_jit.branchAtomicWeakCAS32(JITCompiler::Success, oldValueGPR, newValueGPR, JITCompiler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesFour)); |
3121 | break; |
3122 | default: |
3123 | RELEASE_ASSERT_NOT_REACHED(); |
3124 | break; |
3125 | } |
3126 | m_jit.jump().linkTo(loop, &m_jit); |
3127 | |
3128 | if (!slowPathCases.empty()) { |
3129 | slowPathCases.link(&m_jit); |
3130 | silentSpillAllRegisters(resultGPR); |
3131 | // Since we spilled, we can do things to registers. |
3132 | m_jit.boxCell(baseGPR, JSValueRegs(baseGPR)); |
3133 | m_jit.boxInt32(indexGPR, JSValueRegs(indexGPR)); |
3134 | for (unsigned i = numExtraArgs; i--;) |
3135 | m_jit.boxInt32(argGPRs[i], JSValueRegs(argGPRs[i])); |
3136 | callSlowPath(); |
3137 | silentFillAllRegisters(); |
3138 | m_jit.exceptionCheck(); |
3139 | } |
3140 | |
3141 | success.link(&m_jit); |
3142 | setIntTypedArrayLoadResult(node, resultGPR, type); |
3143 | break; |
3144 | } |
3145 | |
3146 | case AtomicsIsLockFree: { |
3147 | if (node->child1().useKind() != Int32Use) { |
3148 | JSValueOperand operand(this, node->child1()); |
3149 | GPRReg operandGPR = operand.gpr(); |
3150 | flushRegisters(); |
3151 | GPRFlushedCallResult result(this); |
3152 | GPRReg resultGPR = result.gpr(); |
3153 | callOperation(operationAtomicsIsLockFree, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), operandGPR); |
3154 | m_jit.exceptionCheck(); |
3155 | jsValueResult(resultGPR, node); |
3156 | break; |
3157 | } |
3158 | |
3159 | SpeculateInt32Operand operand(this, node->child1()); |
3160 | GPRTemporary result(this); |
3161 | GPRReg operandGPR = operand.gpr(); |
3162 | GPRReg resultGPR = result.gpr(); |
3163 | m_jit.move(TrustedImm32(JSValue::ValueTrue), resultGPR); |
3164 | JITCompiler::JumpList done; |
3165 | done.append(m_jit.branch32(JITCompiler::Equal, operandGPR, TrustedImm32(4))); |
3166 | done.append(m_jit.branch32(JITCompiler::Equal, operandGPR, TrustedImm32(1))); |
3167 | done.append(m_jit.branch32(JITCompiler::Equal, operandGPR, TrustedImm32(2))); |
3168 | m_jit.move(TrustedImm32(JSValue::ValueFalse), resultGPR); |
3169 | done.link(&m_jit); |
3170 | jsValueResult(resultGPR, node); |
3171 | break; |
3172 | } |
3173 | |
3174 | case RegExpExec: { |
3175 | compileRegExpExec(node); |
3176 | break; |
3177 | } |
3178 | |
3179 | case RegExpExecNonGlobalOrSticky: { |
3180 | compileRegExpExecNonGlobalOrSticky(node); |
3181 | break; |
3182 | } |
3183 | |
3184 | case RegExpMatchFastGlobal: { |
3185 | compileRegExpMatchFastGlobal(node); |
3186 | break; |
3187 | } |
3188 | |
3189 | case RegExpTest: { |
3190 | compileRegExpTest(node); |
3191 | break; |
3192 | } |
3193 | |
3194 | case RegExpMatchFast: { |
3195 | compileRegExpMatchFast(node); |
3196 | break; |
3197 | } |
3198 | |
3199 | case StringReplace: |
3200 | case StringReplaceRegExp: { |
3201 | compileStringReplace(node); |
3202 | break; |
3203 | } |
3204 | |
3205 | case GetRegExpObjectLastIndex: { |
3206 | compileGetRegExpObjectLastIndex(node); |
3207 | break; |
3208 | } |
3209 | |
3210 | case SetRegExpObjectLastIndex: { |
3211 | compileSetRegExpObjectLastIndex(node); |
3212 | break; |
3213 | } |
3214 | |
3215 | case RecordRegExpCachedResult: { |
3216 | compileRecordRegExpCachedResult(node); |
3217 | break; |
3218 | } |
3219 | |
3220 | case ArrayPush: { |
3221 | compileArrayPush(node); |
3222 | break; |
3223 | } |
3224 | |
3225 | case ArraySlice: { |
3226 | compileArraySlice(node); |
3227 | break; |
3228 | } |
3229 | |
3230 | case ArrayIndexOf: { |
3231 | compileArrayIndexOf(node); |
3232 | break; |
3233 | } |
3234 | |
3235 | case ArrayPop: { |
3236 | ASSERT(node->arrayMode().isJSArray()); |
3237 | |
3238 | SpeculateCellOperand base(this, node->child1()); |
3239 | StorageOperand storage(this, node->child2()); |
3240 | GPRTemporary value(this); |
3241 | GPRTemporary storageLength(this); |
3242 | FPRTemporary temp(this); // This is kind of lame, since we don't always need it. I'm relying on the fact that we don't have FPR pressure, especially in code that uses pop(). |
3243 | |
3244 | GPRReg baseGPR = base.gpr(); |
3245 | GPRReg storageGPR = storage.gpr(); |
3246 | GPRReg valueGPR = value.gpr(); |
3247 | GPRReg storageLengthGPR = storageLength.gpr(); |
3248 | FPRReg tempFPR = temp.fpr(); |
3249 | |
3250 | switch (node->arrayMode().type()) { |
3251 | case Array::Int32: |
3252 | case Array::Double: |
3253 | case Array::Contiguous: { |
3254 | m_jit.load32( |
3255 | MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); |
3256 | MacroAssembler::Jump undefinedCase = |
3257 | m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR); |
3258 | m_jit.sub32(TrustedImm32(1), storageLengthGPR); |
3259 | m_jit.store32( |
3260 | storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); |
3261 | MacroAssembler::Jump slowCase; |
3262 | if (node->arrayMode().type() == Array::Double) { |
3263 | m_jit.loadDouble( |
3264 | MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight), |
3265 | tempFPR); |
3266 | // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old |
3267 | // length and the new length. |
3268 | m_jit.store64( |
3269 | MacroAssembler::TrustedImm64(bitwise_cast<int64_t>(PNaN)), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight)); |
3270 | slowCase = m_jit.branchIfNaN(tempFPR); |
3271 | boxDouble(tempFPR, valueGPR); |
3272 | } else { |
3273 | m_jit.load64( |
3274 | MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight), |
3275 | valueGPR); |
3276 | // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old |
3277 | // length and the new length. |
3278 | m_jit.store64( |
3279 | MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight)); |
3280 | slowCase = m_jit.branchIfEmpty(valueGPR); |
3281 | } |
3282 | |
3283 | addSlowPathGenerator( |
3284 | slowPathMove( |
3285 | undefinedCase, this, |
3286 | MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR)); |
3287 | addSlowPathGenerator( |
3288 | slowPathCall( |
3289 | slowCase, this, operationArrayPopAndRecoverLength, valueGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR)); |
3290 | |
3291 | // We can't know for sure that the result is an int because of the slow paths. :-/ |
3292 | jsValueResult(valueGPR, node); |
3293 | break; |
3294 | } |
3295 | |
3296 | case Array::ArrayStorage: { |
3297 | m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); |
3298 | |
3299 | JITCompiler::Jump undefinedCase = |
3300 | m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR); |
3301 | |
3302 | m_jit.sub32(TrustedImm32(1), storageLengthGPR); |
3303 | |
3304 | JITCompiler::JumpList slowCases; |
3305 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()))); |
3306 | |
3307 | m_jit.load64(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), valueGPR); |
3308 | slowCases.append(m_jit.branchIfEmpty(valueGPR)); |
3309 | |
3310 | m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); |
3311 | |
3312 | m_jit.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset())); |
3313 | m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); |
3314 | |
3315 | addSlowPathGenerator( |
3316 | slowPathMove( |
3317 | undefinedCase, this, |
3318 | MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR)); |
3319 | |
3320 | addSlowPathGenerator( |
3321 | slowPathCall( |
3322 | slowCases, this, operationArrayPop, valueGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR)); |
3323 | |
3324 | jsValueResult(valueGPR, node); |
3325 | break; |
3326 | } |
3327 | |
3328 | default: |
3329 | CRASH(); |
3330 | break; |
3331 | } |
3332 | break; |
3333 | } |
3334 | |
3335 | case DFG::Jump: { |
3336 | jump(node->targetBlock()); |
3337 | noResult(node); |
3338 | break; |
3339 | } |
3340 | |
3341 | case Branch: |
3342 | emitBranch(node); |
3343 | break; |
3344 | |
3345 | case Switch: |
3346 | emitSwitch(node); |
3347 | break; |
3348 | |
3349 | case Return: { |
3350 | ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT1); |
3351 | ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR); |
3352 | ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister); |
3353 | |
3354 | // Return the result in returnValueGPR. |
3355 | JSValueOperand op1(this, node->child1()); |
3356 | m_jit.move(op1.gpr(), GPRInfo::returnValueGPR); |
3357 | |
3358 | m_jit.emitRestoreCalleeSaves(); |
3359 | m_jit.emitFunctionEpilogue(); |
3360 | m_jit.ret(); |
3361 | |
3362 | noResult(node); |
3363 | break; |
3364 | } |
3365 | |
3366 | case Throw: { |
3367 | compileThrow(node); |
3368 | break; |
3369 | } |
3370 | |
3371 | case ThrowStaticError: { |
3372 | compileThrowStaticError(node); |
3373 | break; |
3374 | } |
3375 | |
3376 | case BooleanToNumber: { |
3377 | switch (node->child1().useKind()) { |
3378 | case BooleanUse: { |
3379 | JSValueOperand value(this, node->child1(), ManualOperandSpeculation); |
3380 | GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add). |
3381 | |
3382 | m_jit.move(value.gpr(), result.gpr()); |
3383 | m_jit.xor64(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
3384 | DFG_TYPE_CHECK( |
3385 | JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64( |
3386 | JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1)))); |
3387 | |
3388 | int32Result(result.gpr(), node); |
3389 | break; |
3390 | } |
3391 | |
3392 | case UntypedUse: { |
3393 | JSValueOperand value(this, node->child1()); |
3394 | GPRTemporary result(this); |
3395 | |
3396 | if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) { |
3397 | m_jit.move(value.gpr(), result.gpr()); |
3398 | m_jit.and32(TrustedImm32(1), result.gpr()); |
3399 | int32Result(result.gpr(), node); |
3400 | break; |
3401 | } |
3402 | |
3403 | m_jit.move(value.gpr(), result.gpr()); |
3404 | m_jit.xor64(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
3405 | JITCompiler::Jump isBoolean = m_jit.branchTest64( |
3406 | JITCompiler::Zero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))); |
3407 | m_jit.move(value.gpr(), result.gpr()); |
3408 | JITCompiler::Jump done = m_jit.jump(); |
3409 | isBoolean.link(&m_jit); |
3410 | m_jit.or64(GPRInfo::numberTagRegister, result.gpr()); |
3411 | done.link(&m_jit); |
3412 | |
3413 | jsValueResult(result.gpr(), node); |
3414 | break; |
3415 | } |
3416 | |
3417 | default: |
3418 | DFG_CRASH(m_jit.graph(), node, "Bad use kind" ); |
3419 | break; |
3420 | } |
3421 | break; |
3422 | } |
3423 | |
3424 | case ToPrimitive: { |
3425 | compileToPrimitive(node); |
3426 | break; |
3427 | } |
3428 | |
3429 | case ToNumber: { |
3430 | JSValueOperand argument(this, node->child1()); |
3431 | GPRTemporary result(this, Reuse, argument); |
3432 | |
3433 | GPRReg argumentGPR = argument.gpr(); |
3434 | GPRReg resultGPR = result.gpr(); |
3435 | |
3436 | argument.use(); |
3437 | |
3438 | // We have several attempts to remove ToNumber. But ToNumber still exists. |
3439 | // It means that converting non-numbers to numbers by this ToNumber is not rare. |
3440 | // Instead of the slow path generator, we emit callOperation here. |
3441 | if (!(m_state.forNode(node->child1()).m_type & SpecBytecodeNumber)) { |
3442 | flushRegisters(); |
3443 | callOperation(operationToNumber, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentGPR); |
3444 | m_jit.exceptionCheck(); |
3445 | } else { |
3446 | MacroAssembler::Jump notNumber = m_jit.branchIfNotNumber(argumentGPR); |
3447 | m_jit.move(argumentGPR, resultGPR); |
3448 | MacroAssembler::Jump done = m_jit.jump(); |
3449 | |
3450 | notNumber.link(&m_jit); |
3451 | silentSpillAllRegisters(resultGPR); |
3452 | callOperation(operationToNumber, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentGPR); |
3453 | silentFillAllRegisters(); |
3454 | m_jit.exceptionCheck(); |
3455 | |
3456 | done.link(&m_jit); |
3457 | } |
3458 | |
3459 | jsValueResult(resultGPR, node, UseChildrenCalledExplicitly); |
3460 | break; |
3461 | } |
3462 | |
3463 | case ToNumeric: { |
3464 | compileToNumeric(node); |
3465 | break; |
3466 | } |
3467 | |
3468 | case ToString: |
3469 | case CallStringConstructor: |
3470 | case StringValueOf: { |
3471 | compileToStringOrCallStringConstructorOrStringValueOf(node); |
3472 | break; |
3473 | } |
3474 | |
3475 | case NewStringObject: { |
3476 | compileNewStringObject(node); |
3477 | break; |
3478 | } |
3479 | |
3480 | case NewSymbol: { |
3481 | compileNewSymbol(node); |
3482 | break; |
3483 | } |
3484 | |
3485 | case NewArray: { |
3486 | compileNewArray(node); |
3487 | break; |
3488 | } |
3489 | |
3490 | case NewArrayWithSpread: { |
3491 | compileNewArrayWithSpread(node); |
3492 | break; |
3493 | } |
3494 | |
3495 | case Spread: { |
3496 | compileSpread(node); |
3497 | break; |
3498 | } |
3499 | |
3500 | case NewArrayWithSize: { |
3501 | compileNewArrayWithSize(node); |
3502 | break; |
3503 | } |
3504 | |
3505 | case NewArrayBuffer: { |
3506 | compileNewArrayBuffer(node); |
3507 | break; |
3508 | } |
3509 | |
3510 | case NewTypedArray: { |
3511 | compileNewTypedArray(node); |
3512 | break; |
3513 | } |
3514 | |
3515 | case NewRegexp: { |
3516 | compileNewRegexp(node); |
3517 | break; |
3518 | } |
3519 | |
3520 | case ToObject: |
3521 | case CallObjectConstructor: { |
3522 | compileToObjectOrCallObjectConstructor(node); |
3523 | break; |
3524 | } |
3525 | |
3526 | case ToThis: { |
3527 | compileToThis(node); |
3528 | break; |
3529 | } |
3530 | |
3531 | case ObjectCreate: { |
3532 | compileObjectCreate(node); |
3533 | break; |
3534 | } |
3535 | |
3536 | case ObjectKeys: { |
3537 | compileObjectKeys(node); |
3538 | break; |
3539 | } |
3540 | |
3541 | case CreateThis: { |
3542 | compileCreateThis(node); |
3543 | break; |
3544 | } |
3545 | |
3546 | case CreatePromise: { |
3547 | compileCreatePromise(node); |
3548 | break; |
3549 | } |
3550 | |
3551 | case CreateGenerator: { |
3552 | compileCreateGenerator(node); |
3553 | break; |
3554 | } |
3555 | |
3556 | case CreateAsyncGenerator: { |
3557 | compileCreateAsyncGenerator(node); |
3558 | break; |
3559 | } |
3560 | |
3561 | case NewObject: { |
3562 | compileNewObject(node); |
3563 | break; |
3564 | } |
3565 | |
3566 | case NewPromise: { |
3567 | compileNewPromise(node); |
3568 | break; |
3569 | } |
3570 | |
3571 | case NewGenerator: { |
3572 | compileNewGenerator(node); |
3573 | break; |
3574 | } |
3575 | |
3576 | case NewAsyncGenerator: { |
3577 | compileNewAsyncGenerator(node); |
3578 | break; |
3579 | } |
3580 | |
3581 | case GetCallee: { |
3582 | compileGetCallee(node); |
3583 | break; |
3584 | } |
3585 | |
3586 | case SetCallee: { |
3587 | compileSetCallee(node); |
3588 | break; |
3589 | } |
3590 | |
3591 | case GetArgumentCountIncludingThis: { |
3592 | compileGetArgumentCountIncludingThis(node); |
3593 | break; |
3594 | } |
3595 | |
3596 | case SetArgumentCountIncludingThis: |
3597 | compileSetArgumentCountIncludingThis(node); |
3598 | break; |
3599 | |
3600 | case GetRestLength: { |
3601 | compileGetRestLength(node); |
3602 | break; |
3603 | } |
3604 | |
3605 | case GetScope: |
3606 | compileGetScope(node); |
3607 | break; |
3608 | |
3609 | case SkipScope: |
3610 | compileSkipScope(node); |
3611 | break; |
3612 | |
3613 | case GetGlobalObject: |
3614 | compileGetGlobalObject(node); |
3615 | break; |
3616 | |
3617 | case GetGlobalThis: |
3618 | compileGetGlobalThis(node); |
3619 | break; |
3620 | |
3621 | case GetClosureVar: { |
3622 | compileGetClosureVar(node); |
3623 | break; |
3624 | } |
3625 | case PutClosureVar: { |
3626 | compilePutClosureVar(node); |
3627 | break; |
3628 | } |
3629 | |
3630 | case GetInternalField: { |
3631 | compileGetInternalField(node); |
3632 | break; |
3633 | } |
3634 | |
3635 | case PutInternalField: { |
3636 | compilePutInternalField(node); |
3637 | break; |
3638 | } |
3639 | |
3640 | case TryGetById: { |
3641 | compileGetById(node, AccessType::TryGetById); |
3642 | break; |
3643 | } |
3644 | |
3645 | case GetByIdDirect: { |
3646 | compileGetById(node, AccessType::GetByIdDirect); |
3647 | break; |
3648 | } |
3649 | |
3650 | case GetByIdDirectFlush: { |
3651 | compileGetByIdFlush(node, AccessType::GetByIdDirect); |
3652 | break; |
3653 | } |
3654 | |
3655 | case GetById: { |
3656 | compileGetById(node, AccessType::GetById); |
3657 | break; |
3658 | } |
3659 | |
3660 | case GetByIdFlush: { |
3661 | compileGetByIdFlush(node, AccessType::GetById); |
3662 | break; |
3663 | } |
3664 | |
3665 | case GetByIdWithThis: { |
3666 | if (node->child1().useKind() == CellUse && node->child2().useKind() == CellUse) { |
3667 | SpeculateCellOperand base(this, node->child1()); |
3668 | GPRReg baseGPR = base.gpr(); |
3669 | SpeculateCellOperand thisValue(this, node->child2()); |
3670 | GPRReg thisValueGPR = thisValue.gpr(); |
3671 | |
3672 | GPRFlushedCallResult result(this); |
3673 | GPRReg resultGPR = result.gpr(); |
3674 | |
3675 | flushRegisters(); |
3676 | |
3677 | cachedGetByIdWithThis(node->origin.semantic, baseGPR, thisValueGPR, resultGPR, node->identifierNumber(), JITCompiler::JumpList()); |
3678 | |
3679 | jsValueResult(resultGPR, node); |
3680 | |
3681 | } else { |
3682 | JSValueOperand base(this, node->child1()); |
3683 | GPRReg baseGPR = base.gpr(); |
3684 | JSValueOperand thisValue(this, node->child2()); |
3685 | GPRReg thisValueGPR = thisValue.gpr(); |
3686 | |
3687 | GPRFlushedCallResult result(this); |
3688 | GPRReg resultGPR = result.gpr(); |
3689 | |
3690 | flushRegisters(); |
3691 | |
3692 | JITCompiler::JumpList notCellList; |
3693 | notCellList.append(m_jit.branchIfNotCell(JSValueRegs(baseGPR))); |
3694 | notCellList.append(m_jit.branchIfNotCell(JSValueRegs(thisValueGPR))); |
3695 | |
3696 | cachedGetByIdWithThis(node->origin.semantic, baseGPR, thisValueGPR, resultGPR, node->identifierNumber(), notCellList); |
3697 | |
3698 | jsValueResult(resultGPR, node); |
3699 | } |
3700 | |
3701 | break; |
3702 | } |
3703 | |
3704 | case GetArrayLength: |
3705 | compileGetArrayLength(node); |
3706 | break; |
3707 | |
3708 | case DeleteById: { |
3709 | compileDeleteById(node); |
3710 | break; |
3711 | } |
3712 | |
3713 | case DeleteByVal: { |
3714 | compileDeleteByVal(node); |
3715 | break; |
3716 | } |
3717 | |
3718 | case CheckCell: { |
3719 | compileCheckCell(node); |
3720 | break; |
3721 | } |
3722 | |
3723 | case CheckNotEmpty: { |
3724 | compileCheckNotEmpty(node); |
3725 | break; |
3726 | } |
3727 | |
3728 | case AssertNotEmpty: { |
3729 | if (validationEnabled()) { |
3730 | JSValueOperand operand(this, node->child1()); |
3731 | GPRReg input = operand.gpr(); |
3732 | auto done = m_jit.branchIfNotEmpty(input); |
3733 | m_jit.breakpoint(); |
3734 | done.link(&m_jit); |
3735 | } |
3736 | noResult(node); |
3737 | break; |
3738 | } |
3739 | |
3740 | case CheckIdent: |
3741 | compileCheckIdent(node); |
3742 | break; |
3743 | |
3744 | case GetExecutable: { |
3745 | compileGetExecutable(node); |
3746 | break; |
3747 | } |
3748 | |
3749 | case CheckStructureOrEmpty: { |
3750 | SpeculateCellOperand cell(this, node->child1()); |
3751 | GPRReg cellGPR = cell.gpr(); |
3752 | |
3753 | GPRReg tempGPR = InvalidGPRReg; |
3754 | Optional<GPRTemporary> temp; |
3755 | if (node->structureSet().size() > 1) { |
3756 | temp.emplace(this); |
3757 | tempGPR = temp->gpr(); |
3758 | } |
3759 | |
3760 | MacroAssembler::Jump isEmpty; |
3761 | if (m_interpreter.forNode(node->child1()).m_type & SpecEmpty) |
3762 | isEmpty = m_jit.branchIfEmpty(cellGPR); |
3763 | |
3764 | emitStructureCheck(node, cellGPR, tempGPR); |
3765 | |
3766 | if (isEmpty.isSet()) |
3767 | isEmpty.link(&m_jit); |
3768 | |
3769 | noResult(node); |
3770 | break; |
3771 | } |
3772 | |
3773 | case CheckStructure: { |
3774 | compileCheckStructure(node); |
3775 | break; |
3776 | } |
3777 | |
3778 | case PutStructure: { |
3779 | RegisteredStructure oldStructure = node->transition()->previous; |
3780 | RegisteredStructure newStructure = node->transition()->next; |
3781 | |
3782 | m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node); |
3783 | |
3784 | SpeculateCellOperand base(this, node->child1()); |
3785 | GPRReg baseGPR = base.gpr(); |
3786 | |
3787 | ASSERT_UNUSED(oldStructure, oldStructure->indexingMode() == newStructure->indexingMode()); |
3788 | ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type()); |
3789 | ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags()); |
3790 | m_jit.store32(MacroAssembler::TrustedImm32(newStructure->id()), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset())); |
3791 | |
3792 | noResult(node); |
3793 | break; |
3794 | } |
3795 | |
3796 | case AllocatePropertyStorage: |
3797 | compileAllocatePropertyStorage(node); |
3798 | break; |
3799 | |
3800 | case ReallocatePropertyStorage: |
3801 | compileReallocatePropertyStorage(node); |
3802 | break; |
3803 | |
3804 | case NukeStructureAndSetButterfly: |
3805 | compileNukeStructureAndSetButterfly(node); |
3806 | break; |
3807 | |
3808 | case GetButterfly: |
3809 | compileGetButterfly(node); |
3810 | break; |
3811 | |
3812 | case GetIndexedPropertyStorage: { |
3813 | compileGetIndexedPropertyStorage(node); |
3814 | break; |
3815 | } |
3816 | |
3817 | case ConstantStoragePointer: { |
3818 | compileConstantStoragePointer(node); |
3819 | break; |
3820 | } |
3821 | |
3822 | case GetTypedArrayByteOffset: { |
3823 | compileGetTypedArrayByteOffset(node); |
3824 | break; |
3825 | } |
3826 | |
3827 | case GetPrototypeOf: { |
3828 | compileGetPrototypeOf(node); |
3829 | break; |
3830 | } |
3831 | |
3832 | case GetByOffset: |
3833 | case GetGetterSetterByOffset: { |
3834 | compileGetByOffset(node); |
3835 | break; |
3836 | } |
3837 | |
3838 | case MatchStructure: { |
3839 | compileMatchStructure(node); |
3840 | break; |
3841 | } |
3842 | |
3843 | case GetGetter: { |
3844 | compileGetGetter(node); |
3845 | break; |
3846 | } |
3847 | |
3848 | case GetSetter: { |
3849 | compileGetSetter(node); |
3850 | break; |
3851 | } |
3852 | |
3853 | case PutByOffset: { |
3854 | compilePutByOffset(node); |
3855 | break; |
3856 | } |
3857 | |
3858 | case PutByIdFlush: { |
3859 | compilePutByIdFlush(node); |
3860 | break; |
3861 | } |
3862 | |
3863 | case PutById: { |
3864 | compilePutById(node); |
3865 | break; |
3866 | } |
3867 | |
3868 | case PutByIdWithThis: { |
3869 | compilePutByIdWithThis(node); |
3870 | break; |
3871 | } |
3872 | |
3873 | case PutByValWithThis: { |
3874 | JSValueOperand base(this, m_jit.graph().varArgChild(node, 0)); |
3875 | GPRReg baseGPR = base.gpr(); |
3876 | JSValueOperand thisValue(this, m_jit.graph().varArgChild(node, 1)); |
3877 | GPRReg thisValueGPR = thisValue.gpr(); |
3878 | JSValueOperand property(this, m_jit.graph().varArgChild(node, 2)); |
3879 | GPRReg propertyGPR = property.gpr(); |
3880 | JSValueOperand value(this, m_jit.graph().varArgChild(node, 3)); |
3881 | GPRReg valueGPR = value.gpr(); |
3882 | |
3883 | flushRegisters(); |
3884 | callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis, NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, thisValueGPR, propertyGPR, valueGPR); |
3885 | m_jit.exceptionCheck(); |
3886 | |
3887 | noResult(node); |
3888 | break; |
3889 | } |
3890 | |
3891 | case PutByIdDirect: { |
3892 | compilePutByIdDirect(node); |
3893 | break; |
3894 | } |
3895 | |
3896 | case PutGetterById: |
3897 | case PutSetterById: { |
3898 | compilePutAccessorById(node); |
3899 | break; |
3900 | } |
3901 | |
3902 | case PutGetterSetterById: { |
3903 | compilePutGetterSetterById(node); |
3904 | break; |
3905 | } |
3906 | |
3907 | case PutGetterByVal: |
3908 | case PutSetterByVal: { |
3909 | compilePutAccessorByVal(node); |
3910 | break; |
3911 | } |
3912 | |
3913 | case DefineDataProperty: { |
3914 | compileDefineDataProperty(node); |
3915 | break; |
3916 | } |
3917 | |
3918 | case DefineAccessorProperty: { |
3919 | compileDefineAccessorProperty(node); |
3920 | break; |
3921 | } |
3922 | |
3923 | case GetGlobalLexicalVariable: |
3924 | case GetGlobalVar: { |
3925 | compileGetGlobalVariable(node); |
3926 | break; |
3927 | } |
3928 | |
3929 | case PutGlobalVariable: { |
3930 | compilePutGlobalVariable(node); |
3931 | break; |
3932 | } |
3933 | |
3934 | case PutDynamicVar: { |
3935 | compilePutDynamicVar(node); |
3936 | break; |
3937 | } |
3938 | |
3939 | case GetDynamicVar: { |
3940 | compileGetDynamicVar(node); |
3941 | break; |
3942 | } |
3943 | |
3944 | case ResolveScopeForHoistingFuncDeclInEval: { |
3945 | compileResolveScopeForHoistingFuncDeclInEval(node); |
3946 | break; |
3947 | } |
3948 | |
3949 | case ResolveScope: { |
3950 | compileResolveScope(node); |
3951 | break; |
3952 | } |
3953 | |
3954 | case NotifyWrite: { |
3955 | compileNotifyWrite(node); |
3956 | break; |
3957 | } |
3958 | |
3959 | case CheckTypeInfoFlags: { |
3960 | compileCheckTypeInfoFlags(node); |
3961 | break; |
3962 | } |
3963 | |
3964 | case ParseInt: { |
3965 | compileParseInt(node); |
3966 | break; |
3967 | } |
3968 | |
3969 | case OverridesHasInstance: { |
3970 | compileOverridesHasInstance(node); |
3971 | break; |
3972 | } |
3973 | |
3974 | case InstanceOf: { |
3975 | compileInstanceOf(node); |
3976 | break; |
3977 | } |
3978 | |
3979 | case InstanceOfCustom: { |
3980 | compileInstanceOfCustom(node); |
3981 | break; |
3982 | } |
3983 | |
3984 | case IsEmpty: { |
3985 | JSValueOperand value(this, node->child1()); |
3986 | GPRTemporary result(this, Reuse, value); |
3987 | |
3988 | m_jit.comparePtr(JITCompiler::Equal, value.gpr(), TrustedImm32(JSValue::encode(JSValue())), result.gpr()); |
3989 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
3990 | |
3991 | jsValueResult(result.gpr(), node, DataFormatJSBoolean); |
3992 | break; |
3993 | } |
3994 | |
3995 | case IsUndefined: { |
3996 | JSValueOperand value(this, node->child1()); |
3997 | GPRTemporary result(this); |
3998 | GPRTemporary localGlobalObject(this); |
3999 | GPRTemporary remoteGlobalObject(this); |
4000 | GPRTemporary scratch(this); |
4001 | |
4002 | JITCompiler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs()); |
4003 | |
4004 | m_jit.compare64(JITCompiler::Equal, value.gpr(), TrustedImm32(JSValue::ValueUndefined), result.gpr()); |
4005 | JITCompiler::Jump done = m_jit.jump(); |
4006 | |
4007 | isCell.link(&m_jit); |
4008 | JITCompiler::Jump notMasqueradesAsUndefined; |
4009 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { |
4010 | m_jit.move(TrustedImm32(0), result.gpr()); |
4011 | notMasqueradesAsUndefined = m_jit.jump(); |
4012 | } else { |
4013 | JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8( |
4014 | JITCompiler::NonZero, |
4015 | JITCompiler::Address(value.gpr(), JSCell::typeInfoFlagsOffset()), |
4016 | TrustedImm32(MasqueradesAsUndefined)); |
4017 | m_jit.move(TrustedImm32(0), result.gpr()); |
4018 | notMasqueradesAsUndefined = m_jit.jump(); |
4019 | |
4020 | isMasqueradesAsUndefined.link(&m_jit); |
4021 | GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); |
4022 | GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); |
4023 | m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR); |
4024 | m_jit.emitLoadStructure(vm(), value.gpr(), result.gpr(), scratch.gpr()); |
4025 | m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR); |
4026 | m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr()); |
4027 | } |
4028 | |
4029 | notMasqueradesAsUndefined.link(&m_jit); |
4030 | done.link(&m_jit); |
4031 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
4032 | jsValueResult(result.gpr(), node, DataFormatJSBoolean); |
4033 | break; |
4034 | } |
4035 | |
4036 | case IsUndefinedOrNull: { |
4037 | JSValueOperand value(this, node->child1()); |
4038 | GPRTemporary result(this, Reuse, value); |
4039 | |
4040 | GPRReg valueGPR = value.gpr(); |
4041 | GPRReg resultGPR = result.gpr(); |
4042 | |
4043 | m_jit.move(valueGPR, resultGPR); |
4044 | m_jit.and64(CCallHelpers::TrustedImm32(~JSValue::UndefinedTag), resultGPR); |
4045 | m_jit.compare64(CCallHelpers::Equal, resultGPR, CCallHelpers::TrustedImm32(JSValue::ValueNull), resultGPR); |
4046 | |
4047 | unblessedBooleanResult(resultGPR, node); |
4048 | break; |
4049 | } |
4050 | |
4051 | case IsBoolean: { |
4052 | JSValueOperand value(this, node->child1()); |
4053 | GPRTemporary result(this, Reuse, value); |
4054 | |
4055 | m_jit.move(value.gpr(), result.gpr()); |
4056 | m_jit.xor64(JITCompiler::TrustedImm32(JSValue::ValueFalse), result.gpr()); |
4057 | m_jit.test64(JITCompiler::Zero, result.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result.gpr()); |
4058 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
4059 | jsValueResult(result.gpr(), node, DataFormatJSBoolean); |
4060 | break; |
4061 | } |
4062 | |
4063 | case IsNumber: { |
4064 | JSValueOperand value(this, node->child1()); |
4065 | GPRTemporary result(this, Reuse, value); |
4066 | |
4067 | m_jit.test64(JITCompiler::NonZero, value.gpr(), GPRInfo::numberTagRegister, result.gpr()); |
4068 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), result.gpr()); |
4069 | jsValueResult(result.gpr(), node, DataFormatJSBoolean); |
4070 | break; |
4071 | } |
4072 | |
4073 | case NumberIsInteger: { |
4074 | JSValueOperand value(this, node->child1()); |
4075 | GPRTemporary result(this, Reuse, value); |
4076 | |
4077 | FPRTemporary temp1(this); |
4078 | FPRTemporary temp2(this); |
4079 | |
4080 | JSValueRegs valueRegs = JSValueRegs(value.gpr()); |
4081 | GPRReg resultGPR = result.gpr(); |
4082 | |
4083 | FPRReg tempFPR1 = temp1.fpr(); |
4084 | FPRReg tempFPR2 = temp2.fpr(); |
4085 | |
4086 | MacroAssembler::JumpList done; |
4087 | |
4088 | auto isInt32 = m_jit.branchIfInt32(valueRegs); |
4089 | auto notNumber = m_jit.branchIfNotDoubleKnownNotInt32(valueRegs); |
4090 | |
4091 | // We're a double here. |
4092 | m_jit.unboxDouble(valueRegs.gpr(), resultGPR, tempFPR1); |
4093 | m_jit.urshift64(TrustedImm32(52), resultGPR); |
4094 | m_jit.and32(TrustedImm32(0x7ff), resultGPR); |
4095 | auto notNanNorInfinity = m_jit.branch32(JITCompiler::NotEqual, TrustedImm32(0x7ff), resultGPR); |
4096 | m_jit.move(TrustedImm32(JSValue::ValueFalse), resultGPR); |
4097 | done.append(m_jit.jump()); |
4098 | |
4099 | notNanNorInfinity.link(&m_jit); |
4100 | m_jit.roundTowardZeroDouble(tempFPR1, tempFPR2); |
4101 | m_jit.compareDouble(JITCompiler::DoubleEqual, tempFPR1, tempFPR2, resultGPR); |
4102 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), resultGPR); |
4103 | done.append(m_jit.jump()); |
4104 | |
4105 | isInt32.link(&m_jit); |
4106 | m_jit.move(TrustedImm32(JSValue::ValueTrue), resultGPR); |
4107 | done.append(m_jit.jump()); |
4108 | |
4109 | notNumber.link(&m_jit); |
4110 | m_jit.move(TrustedImm32(JSValue::ValueFalse), resultGPR); |
4111 | |
4112 | done.link(&m_jit); |
4113 | jsValueResult(resultGPR, node, DataFormatJSBoolean); |
4114 | break; |
4115 | } |
4116 | |
4117 | case MapHash: { |
4118 | switch (node->child1().useKind()) { |
4119 | case BooleanUse: |
4120 | case Int32Use: |
4121 | case SymbolUse: |
4122 | case ObjectUse: { |
4123 | JSValueOperand input(this, node->child1(), ManualOperandSpeculation); |
4124 | GPRTemporary result(this, Reuse, input); |
4125 | GPRTemporary temp(this); |
4126 | |
4127 | GPRReg inputGPR = input.gpr(); |
4128 | GPRReg resultGPR = result.gpr(); |
4129 | GPRReg tempGPR = temp.gpr(); |
4130 | |
4131 | speculate(node, node->child1()); |
4132 | |
4133 | m_jit.move(inputGPR, resultGPR); |
4134 | m_jit.wangsInt64Hash(resultGPR, tempGPR); |
4135 | int32Result(resultGPR, node); |
4136 | break; |
4137 | } |
4138 | case CellUse: |
4139 | case StringUse: { |
4140 | SpeculateCellOperand input(this, node->child1()); |
4141 | GPRTemporary result(this); |
4142 | Optional<GPRTemporary> temp; |
4143 | |
4144 | GPRReg tempGPR = InvalidGPRReg; |
4145 | if (node->child1().useKind() == CellUse) { |
4146 | temp.emplace(this); |
4147 | tempGPR = temp->gpr(); |
4148 | } |
4149 | |
4150 | GPRReg inputGPR = input.gpr(); |
4151 | GPRReg resultGPR = result.gpr(); |
4152 | |
4153 | MacroAssembler::JumpList slowPath; |
4154 | MacroAssembler::JumpList done; |
4155 | |
4156 | if (node->child1().useKind() == StringUse) |
4157 | speculateString(node->child1(), inputGPR); |
4158 | else { |
4159 | auto isString = m_jit.branchIfString(inputGPR); |
4160 | m_jit.move(inputGPR, resultGPR); |
4161 | m_jit.wangsInt64Hash(resultGPR, tempGPR); |
4162 | done.append(m_jit.jump()); |
4163 | isString.link(&m_jit); |
4164 | } |
4165 | |
4166 | m_jit.loadPtr(MacroAssembler::Address(inputGPR, JSString::offsetOfValue()), resultGPR); |
4167 | slowPath.append(m_jit.branchIfRopeStringImpl(resultGPR)); |
4168 | m_jit.load32(MacroAssembler::Address(resultGPR, StringImpl::flagsOffset()), resultGPR); |
4169 | m_jit.urshift32(MacroAssembler::TrustedImm32(StringImpl::s_flagCount), resultGPR); |
4170 | slowPath.append(m_jit.branchTest32(MacroAssembler::Zero, resultGPR)); |
4171 | done.append(m_jit.jump()); |
4172 | |
4173 | slowPath.link(&m_jit); |
4174 | silentSpillAllRegisters(resultGPR); |
4175 | callOperation(operationMapHash, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), JSValueRegs(inputGPR)); |
4176 | silentFillAllRegisters(); |
4177 | m_jit.exceptionCheck(); |
4178 | |
4179 | done.link(&m_jit); |
4180 | int32Result(resultGPR, node); |
4181 | break; |
4182 | } |
4183 | default: |
4184 | RELEASE_ASSERT(node->child1().useKind() == UntypedUse); |
4185 | break; |
4186 | } |
4187 | |
4188 | if (node->child1().useKind() != UntypedUse) |
4189 | break; |
4190 | |
4191 | JSValueOperand input(this, node->child1()); |
4192 | GPRTemporary temp(this); |
4193 | GPRTemporary result(this); |
4194 | |
4195 | GPRReg inputGPR = input.gpr(); |
4196 | GPRReg resultGPR = result.gpr(); |
4197 | GPRReg tempGPR = temp.gpr(); |
4198 | |
4199 | MacroAssembler::JumpList straightHash; |
4200 | MacroAssembler::JumpList done; |
4201 | straightHash.append(m_jit.branchIfNotCell(inputGPR)); |
4202 | MacroAssembler::JumpList slowPath; |
4203 | straightHash.append(m_jit.branchIfNotString(inputGPR)); |
4204 | m_jit.loadPtr(MacroAssembler::Address(inputGPR, JSString::offsetOfValue()), resultGPR); |
4205 | slowPath.append(m_jit.branchIfRopeStringImpl(resultGPR)); |
4206 | m_jit.load32(MacroAssembler::Address(resultGPR, StringImpl::flagsOffset()), resultGPR); |
4207 | m_jit.urshift32(MacroAssembler::TrustedImm32(StringImpl::s_flagCount), resultGPR); |
4208 | slowPath.append(m_jit.branchTest32(MacroAssembler::Zero, resultGPR)); |
4209 | done.append(m_jit.jump()); |
4210 | |
4211 | straightHash.link(&m_jit); |
4212 | m_jit.move(inputGPR, resultGPR); |
4213 | m_jit.wangsInt64Hash(resultGPR, tempGPR); |
4214 | done.append(m_jit.jump()); |
4215 | |
4216 | slowPath.link(&m_jit); |
4217 | silentSpillAllRegisters(resultGPR); |
4218 | callOperation(operationMapHash, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), JSValueRegs(inputGPR)); |
4219 | silentFillAllRegisters(); |
4220 | m_jit.exceptionCheck(); |
4221 | |
4222 | done.link(&m_jit); |
4223 | int32Result(resultGPR, node); |
4224 | break; |
4225 | } |
4226 | |
4227 | case NormalizeMapKey: { |
4228 | compileNormalizeMapKey(node); |
4229 | break; |
4230 | } |
4231 | |
4232 | case GetMapBucket: { |
4233 | SpeculateCellOperand map(this, node->child1()); |
4234 | JSValueOperand key(this, node->child2(), ManualOperandSpeculation); |
4235 | SpeculateInt32Operand hash(this, node->child3()); |
4236 | GPRTemporary mask(this); |
4237 | GPRTemporary index(this); |
4238 | GPRTemporary buffer(this); |
4239 | GPRTemporary bucket(this); |
4240 | GPRTemporary result(this); |
4241 | |
4242 | GPRReg hashGPR = hash.gpr(); |
4243 | GPRReg mapGPR = map.gpr(); |
4244 | GPRReg maskGPR = mask.gpr(); |
4245 | GPRReg indexGPR = index.gpr(); |
4246 | GPRReg bufferGPR = buffer.gpr(); |
4247 | GPRReg bucketGPR = bucket.gpr(); |
4248 | GPRReg keyGPR = key.gpr(); |
4249 | GPRReg resultGPR = result.gpr(); |
4250 | |
4251 | if (node->child1().useKind() == MapObjectUse) |
4252 | speculateMapObject(node->child1(), mapGPR); |
4253 | else if (node->child1().useKind() == SetObjectUse) |
4254 | speculateSetObject(node->child1(), mapGPR); |
4255 | else |
4256 | RELEASE_ASSERT_NOT_REACHED(); |
4257 | |
4258 | if (node->child2().useKind() != UntypedUse) |
4259 | speculate(node, node->child2()); |
4260 | |
4261 | m_jit.load32(MacroAssembler::Address(mapGPR, HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfCapacity()), maskGPR); |
4262 | m_jit.loadPtr(MacroAssembler::Address(mapGPR, HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfBuffer()), bufferGPR); |
4263 | m_jit.sub32(TrustedImm32(1), maskGPR); |
4264 | m_jit.move(hashGPR, indexGPR); |
4265 | |
4266 | MacroAssembler::Label loop = m_jit.label(); |
4267 | MacroAssembler::JumpList done; |
4268 | MacroAssembler::JumpList slowPathCases; |
4269 | MacroAssembler::JumpList loopAround; |
4270 | |
4271 | m_jit.and32(maskGPR, indexGPR); |
4272 | m_jit.loadPtr(MacroAssembler::BaseIndex(bufferGPR, indexGPR, MacroAssembler::TimesEight), bucketGPR); |
4273 | m_jit.move(bucketGPR, resultGPR); |
4274 | auto notPresentInTable = m_jit.branchPtr(MacroAssembler::Equal, |
4275 | bucketGPR, TrustedImmPtr(bitwise_cast<size_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::emptyValue()))); |
4276 | loopAround.append(m_jit.branchPtr(MacroAssembler::Equal, |
4277 | bucketGPR, TrustedImmPtr(bitwise_cast<size_t>(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::deletedValue())))); |
4278 | |
4279 | m_jit.load64(MacroAssembler::Address(bucketGPR, HashMapBucket<HashMapBucketDataKey>::offsetOfKey()), bucketGPR); |
4280 | |
4281 | // Perform Object.is() |
4282 | switch (node->child2().useKind()) { |
4283 | case BooleanUse: |
4284 | case Int32Use: |
4285 | case SymbolUse: |
4286 | case ObjectUse: { |
4287 | done.append(m_jit.branch64(MacroAssembler::Equal, bucketGPR, keyGPR)); // They're definitely the same value, we found the bucket we were looking for! |
4288 | // Otherwise, loop around. |
4289 | break; |
4290 | } |
4291 | case CellUse: { |
4292 | done.append(m_jit.branch64(MacroAssembler::Equal, bucketGPR, keyGPR)); |
4293 | loopAround.append(m_jit.branchIfNotCell(JSValueRegs(bucketGPR))); |
4294 | loopAround.append(m_jit.branchIfNotString(bucketGPR)); |
4295 | loopAround.append(m_jit.branchIfNotString(keyGPR)); |
4296 | // They're both strings. |
4297 | slowPathCases.append(m_jit.jump()); |
4298 | break; |
4299 | } |
4300 | case StringUse: { |
4301 | done.append(m_jit.branch64(MacroAssembler::Equal, bucketGPR, keyGPR)); // They're definitely the same value, we found the bucket we were looking for! |
4302 | loopAround.append(m_jit.branchIfNotCell(JSValueRegs(bucketGPR))); |
4303 | loopAround.append(m_jit.branchIfNotString(bucketGPR)); |
4304 | slowPathCases.append(m_jit.jump()); |
4305 | break; |
4306 | } |
4307 | case UntypedUse: { |
4308 | done.append(m_jit.branch64(MacroAssembler::Equal, bucketGPR, keyGPR)); // They're definitely the same value, we found the bucket we were looking for! |
4309 | // The input key and bucket's key are already normalized. So if 64-bit compare fails and one is not a cell, they're definitely not equal. |
4310 | loopAround.append(m_jit.branchIfNotCell(JSValueRegs(bucketGPR))); |
4311 | // first is a cell here. |
4312 | loopAround.append(m_jit.branchIfNotCell(JSValueRegs(keyGPR))); |
4313 | // Both are cells here. |
4314 | loopAround.append(m_jit.branchIfNotString(bucketGPR)); |
4315 | // The first is a string here. |
4316 | slowPathCases.append(m_jit.branchIfString(keyGPR)); |
4317 | // The first is a string, but the second is not, we continue to loop around. |
4318 | loopAround.append(m_jit.jump()); |
4319 | break; |
4320 | } |
4321 | default: |
4322 | RELEASE_ASSERT_NOT_REACHED(); |
4323 | } |
4324 | |
4325 | |
4326 | if (!loopAround.empty()) |
4327 | loopAround.link(&m_jit); |
4328 | |
4329 | m_jit.add32(TrustedImm32(1), indexGPR); |
4330 | m_jit.jump().linkTo(loop, &m_jit); |
4331 | |
4332 | if (!slowPathCases.empty()) { |
4333 | slowPathCases.link(&m_jit); |
4334 | silentSpillAllRegisters(indexGPR); |
4335 | if (node->child1().useKind() == MapObjectUse) |
4336 | callOperation(operationJSMapFindBucket, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), mapGPR, keyGPR, hashGPR); |
4337 | else |
4338 | callOperation(operationJSSetFindBucket, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), mapGPR, keyGPR, hashGPR); |
4339 | silentFillAllRegisters(); |
4340 | m_jit.exceptionCheck(); |
4341 | done.append(m_jit.jump()); |
4342 | } |
4343 | |
4344 | notPresentInTable.link(&m_jit); |
4345 | if (node->child1().useKind() == MapObjectUse) |
4346 | m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), vm().sentinelMapBucket()), resultGPR); |
4347 | else |
4348 | m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), vm().sentinelSetBucket()), resultGPR); |
4349 | done.link(&m_jit); |
4350 | cellResult(resultGPR, node); |
4351 | break; |
4352 | } |
4353 | |
4354 | case GetMapBucketHead: |
4355 | compileGetMapBucketHead(node); |
4356 | break; |
4357 | |
4358 | case GetMapBucketNext: |
4359 | compileGetMapBucketNext(node); |
4360 | break; |
4361 | |
4362 | case LoadKeyFromMapBucket: |
4363 | compileLoadKeyFromMapBucket(node); |
4364 | break; |
4365 | |
4366 | case LoadValueFromMapBucket: |
4367 | compileLoadValueFromMapBucket(node); |
4368 | break; |
4369 | |
4370 | case ExtractValueFromWeakMapGet: |
4371 | compileExtractValueFromWeakMapGet(node); |
4372 | break; |
4373 | |
4374 | case SetAdd: |
4375 | compileSetAdd(node); |
4376 | break; |
4377 | |
4378 | case MapSet: |
4379 | compileMapSet(node); |
4380 | break; |
4381 | |
4382 | case WeakMapGet: |
4383 | compileWeakMapGet(node); |
4384 | break; |
4385 | |
4386 | case WeakSetAdd: |
4387 | compileWeakSetAdd(node); |
4388 | break; |
4389 | |
4390 | case WeakMapSet: |
4391 | compileWeakMapSet(node); |
4392 | break; |
4393 | |
4394 | case StringSlice: { |
4395 | compileStringSlice(node); |
4396 | break; |
4397 | } |
4398 | |
4399 | case ToLowerCase: { |
4400 | compileToLowerCase(node); |
4401 | break; |
4402 | } |
4403 | |
4404 | case NumberToStringWithRadix: { |
4405 | compileNumberToStringWithRadix(node); |
4406 | break; |
4407 | } |
4408 | |
4409 | case NumberToStringWithValidRadixConstant: { |
4410 | compileNumberToStringWithValidRadixConstant(node); |
4411 | break; |
4412 | } |
4413 | |
4414 | case IsObject: { |
4415 | compileIsObject(node); |
4416 | break; |
4417 | } |
4418 | |
4419 | case IsObjectOrNull: { |
4420 | compileIsObjectOrNull(node); |
4421 | break; |
4422 | } |
4423 | |
4424 | case IsFunction: { |
4425 | compileIsFunction(node); |
4426 | break; |
4427 | } |
4428 | |
4429 | case IsCellWithType: { |
4430 | compileIsCellWithType(node); |
4431 | break; |
4432 | } |
4433 | |
4434 | case IsTypedArrayView: { |
4435 | compileIsTypedArrayView(node); |
4436 | break; |
4437 | } |
4438 | |
4439 | case TypeOf: { |
4440 | compileTypeOf(node); |
4441 | break; |
4442 | } |
4443 | |
4444 | case Flush: |
4445 | break; |
4446 | |
4447 | case Call: |
4448 | case TailCall: |
4449 | case TailCallInlinedCaller: |
4450 | case Construct: |
4451 | case CallVarargs: |
4452 | case TailCallVarargs: |
4453 | case TailCallVarargsInlinedCaller: |
4454 | case CallForwardVarargs: |
4455 | case ConstructVarargs: |
4456 | case ConstructForwardVarargs: |
4457 | case TailCallForwardVarargs: |
4458 | case TailCallForwardVarargsInlinedCaller: |
4459 | case CallEval: |
4460 | case DirectCall: |
4461 | case DirectConstruct: |
4462 | case DirectTailCall: |
4463 | case DirectTailCallInlinedCaller: |
4464 | emitCall(node); |
4465 | break; |
4466 | |
4467 | case LoadVarargs: { |
4468 | compileLoadVarargs(node); |
4469 | break; |
4470 | } |
4471 | |
4472 | case ForwardVarargs: { |
4473 | compileForwardVarargs(node); |
4474 | break; |
4475 | } |
4476 | |
4477 | case CreateActivation: { |
4478 | compileCreateActivation(node); |
4479 | break; |
4480 | } |
4481 | |
4482 | case PushWithScope: { |
4483 | compilePushWithScope(node); |
4484 | break; |
4485 | } |
4486 | |
4487 | case CreateDirectArguments: { |
4488 | compileCreateDirectArguments(node); |
4489 | break; |
4490 | } |
4491 | |
4492 | case GetFromArguments: { |
4493 | compileGetFromArguments(node); |
4494 | break; |
4495 | } |
4496 | |
4497 | case PutToArguments: { |
4498 | compilePutToArguments(node); |
4499 | break; |
4500 | } |
4501 | |
4502 | case GetArgument: { |
4503 | compileGetArgument(node); |
4504 | break; |
4505 | } |
4506 | |
4507 | case CreateScopedArguments: { |
4508 | compileCreateScopedArguments(node); |
4509 | break; |
4510 | } |
4511 | |
4512 | case CreateClonedArguments: { |
4513 | compileCreateClonedArguments(node); |
4514 | break; |
4515 | } |
4516 | case CreateRest: { |
4517 | compileCreateRest(node); |
4518 | break; |
4519 | } |
4520 | |
4521 | case NewFunction: |
4522 | case NewGeneratorFunction: |
4523 | case NewAsyncGeneratorFunction: |
4524 | case NewAsyncFunction: |
4525 | compileNewFunction(node); |
4526 | break; |
4527 | |
4528 | case SetFunctionName: |
4529 | compileSetFunctionName(node); |
4530 | break; |
4531 | |
4532 | case InById: |
4533 | compileInById(node); |
4534 | break; |
4535 | |
4536 | case InByVal: |
4537 | compileInByVal(node); |
4538 | break; |
4539 | |
4540 | case HasOwnProperty: { |
4541 | SpeculateCellOperand object(this, node->child1()); |
4542 | GPRTemporary uniquedStringImpl(this); |
4543 | GPRTemporary temp(this); |
4544 | GPRTemporary hash(this); |
4545 | GPRTemporary structureID(this); |
4546 | GPRTemporary result(this); |
4547 | |
4548 | Optional<SpeculateCellOperand> keyAsCell; |
4549 | Optional<JSValueOperand> keyAsValue; |
4550 | GPRReg keyGPR; |
4551 | if (node->child2().useKind() == UntypedUse) { |
4552 | keyAsValue.emplace(this, node->child2()); |
4553 | keyGPR = keyAsValue->gpr(); |
4554 | } else { |
4555 | ASSERT(node->child2().useKind() == StringUse || node->child2().useKind() == SymbolUse); |
4556 | keyAsCell.emplace(this, node->child2()); |
4557 | keyGPR = keyAsCell->gpr(); |
4558 | } |
4559 | |
4560 | GPRReg objectGPR = object.gpr(); |
4561 | GPRReg implGPR = uniquedStringImpl.gpr(); |
4562 | GPRReg tempGPR = temp.gpr(); |
4563 | GPRReg hashGPR = hash.gpr(); |
4564 | GPRReg structureIDGPR = structureID.gpr(); |
4565 | GPRReg resultGPR = result.gpr(); |
4566 | |
4567 | speculateObject(node->child1()); |
4568 | |
4569 | MacroAssembler::JumpList slowPath; |
4570 | switch (node->child2().useKind()) { |
4571 | case SymbolUse: { |
4572 | speculateSymbol(node->child2(), keyGPR); |
4573 | m_jit.loadPtr(MacroAssembler::Address(keyGPR, Symbol::offsetOfSymbolImpl()), implGPR); |
4574 | break; |
4575 | } |
4576 | case StringUse: { |
4577 | speculateString(node->child2(), keyGPR); |
4578 | m_jit.loadPtr(MacroAssembler::Address(keyGPR, JSString::offsetOfValue()), implGPR); |
4579 | slowPath.append(m_jit.branchIfRopeStringImpl(implGPR)); |
4580 | slowPath.append(m_jit.branchTest32( |
4581 | MacroAssembler::Zero, MacroAssembler::Address(implGPR, StringImpl::flagsOffset()), |
4582 | MacroAssembler::TrustedImm32(StringImpl::flagIsAtom()))); |
4583 | break; |
4584 | } |
4585 | case UntypedUse: { |
4586 | slowPath.append(m_jit.branchIfNotCell(JSValueRegs(keyGPR))); |
4587 | auto isNotString = m_jit.branchIfNotString(keyGPR); |
4588 | m_jit.loadPtr(MacroAssembler::Address(keyGPR, JSString::offsetOfValue()), implGPR); |
4589 | slowPath.append(m_jit.branchIfRopeStringImpl(implGPR)); |
4590 | slowPath.append(m_jit.branchTest32( |
4591 | MacroAssembler::Zero, MacroAssembler::Address(implGPR, StringImpl::flagsOffset()), |
4592 | MacroAssembler::TrustedImm32(StringImpl::flagIsAtom()))); |
4593 | auto hasUniquedImpl = m_jit.jump(); |
4594 | |
4595 | isNotString.link(&m_jit); |
4596 | slowPath.append(m_jit.branchIfNotSymbol(keyGPR)); |
4597 | m_jit.loadPtr(MacroAssembler::Address(keyGPR, Symbol::offsetOfSymbolImpl()), implGPR); |
4598 | |
4599 | hasUniquedImpl.link(&m_jit); |
4600 | break; |
4601 | } |
4602 | default: |
4603 | RELEASE_ASSERT_NOT_REACHED(); |
4604 | } |
4605 | |
4606 | // Note that we don't test if the hash is zero here. AtomStringImpl's can't have a zero |
4607 | // hash, however, a SymbolImpl may. But, because this is a cache, we don't care. We only |
4608 | // ever load the result from the cache if the cache entry matches what we are querying for. |
4609 | // So we either get super lucky and use zero for the hash and somehow collide with the entity |
4610 | // we're looking for, or we realize we're comparing against another entity, and go to the |
4611 | // slow path anyways. |
4612 | m_jit.load32(MacroAssembler::Address(implGPR, UniquedStringImpl::flagsOffset()), hashGPR); |
4613 | m_jit.urshift32(MacroAssembler::TrustedImm32(StringImpl::s_flagCount), hashGPR); |
4614 | m_jit.load32(MacroAssembler::Address(objectGPR, JSCell::structureIDOffset()), structureIDGPR); |
4615 | m_jit.add32(structureIDGPR, hashGPR); |
4616 | m_jit.and32(TrustedImm32(HasOwnPropertyCache::mask), hashGPR); |
4617 | if (hasOneBitSet(sizeof(HasOwnPropertyCache::Entry))) // is a power of 2 |
4618 | m_jit.lshift32(TrustedImm32(getLSBSet(sizeof(HasOwnPropertyCache::Entry))), hashGPR); |
4619 | else |
4620 | m_jit.mul32(TrustedImm32(sizeof(HasOwnPropertyCache::Entry)), hashGPR, hashGPR); |
4621 | ASSERT(vm().hasOwnPropertyCache()); |
4622 | m_jit.move(TrustedImmPtr(vm().hasOwnPropertyCache()), tempGPR); |
4623 | slowPath.append(m_jit.branchPtr(MacroAssembler::NotEqual, |
4624 | MacroAssembler::BaseIndex(tempGPR, hashGPR, MacroAssembler::TimesOne, HasOwnPropertyCache::Entry::offsetOfImpl()), implGPR)); |
4625 | m_jit.load8(MacroAssembler::BaseIndex(tempGPR, hashGPR, MacroAssembler::TimesOne, HasOwnPropertyCache::Entry::offsetOfResult()), resultGPR); |
4626 | m_jit.load32(MacroAssembler::BaseIndex(tempGPR, hashGPR, MacroAssembler::TimesOne, HasOwnPropertyCache::Entry::offsetOfStructureID()), tempGPR); |
4627 | slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, structureIDGPR)); |
4628 | auto done = m_jit.jump(); |
4629 | |
4630 | slowPath.link(&m_jit); |
4631 | silentSpillAllRegisters(resultGPR); |
4632 | callOperation(operationHasOwnProperty, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), objectGPR, keyGPR); |
4633 | silentFillAllRegisters(); |
4634 | m_jit.exceptionCheck(); |
4635 | |
4636 | done.link(&m_jit); |
4637 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), resultGPR); |
4638 | jsValueResult(resultGPR, node, DataFormatJSBoolean); |
4639 | break; |
4640 | } |
4641 | |
4642 | case CountExecution: |
4643 | m_jit.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node->executionCounter()->address())); |
4644 | break; |
4645 | |
4646 | case SuperSamplerBegin: |
4647 | m_jit.add32(TrustedImm32(1), MacroAssembler::AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); |
4648 | break; |
4649 | |
4650 | case SuperSamplerEnd: |
4651 | m_jit.sub32(TrustedImm32(1), MacroAssembler::AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); |
4652 | break; |
4653 | |
4654 | case ForceOSRExit: { |
4655 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); |
4656 | break; |
4657 | } |
4658 | |
4659 | case InvalidationPoint: |
4660 | emitInvalidationPoint(node); |
4661 | break; |
4662 | |
4663 | case CheckTraps: |
4664 | compileCheckTraps(node); |
4665 | break; |
4666 | |
4667 | case Phantom: |
4668 | case Check: |
4669 | case CheckVarargs: |
4670 | DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate); |
4671 | noResult(node); |
4672 | break; |
4673 | |
4674 | case PhantomLocal: |
4675 | case LoopHint: |
4676 | // This is a no-op. |
4677 | noResult(node); |
4678 | break; |
4679 | |
4680 | case Unreachable: |
4681 | unreachable(node); |
4682 | break; |
4683 | |
4684 | case StoreBarrier: |
4685 | case FencedStoreBarrier: { |
4686 | compileStoreBarrier(node); |
4687 | break; |
4688 | } |
4689 | |
4690 | case GetEnumerableLength: { |
4691 | compileGetEnumerableLength(node); |
4692 | break; |
4693 | } |
4694 | case HasGenericProperty: { |
4695 | compileHasGenericProperty(node); |
4696 | break; |
4697 | } |
4698 | case HasStructureProperty: { |
4699 | compileHasStructureProperty(node); |
4700 | break; |
4701 | } |
4702 | case HasIndexedProperty: { |
4703 | compileHasIndexedProperty(node); |
4704 | break; |
4705 | } |
4706 | case GetDirectPname: { |
4707 | compileGetDirectPname(node); |
4708 | break; |
4709 | } |
4710 | case GetPropertyEnumerator: { |
4711 | compileGetPropertyEnumerator(node); |
4712 | break; |
4713 | } |
4714 | case GetEnumeratorStructurePname: |
4715 | case GetEnumeratorGenericPname: { |
4716 | compileGetEnumeratorPname(node); |
4717 | break; |
4718 | } |
4719 | case ToIndexString: { |
4720 | compileToIndexString(node); |
4721 | break; |
4722 | } |
4723 | case ProfileType: { |
4724 | compileProfileType(node); |
4725 | break; |
4726 | } |
4727 | case ProfileControlFlow: { |
4728 | BasicBlockLocation* basicBlockLocation = node->basicBlockLocation(); |
4729 | basicBlockLocation->emitExecuteCode(m_jit); |
4730 | noResult(node); |
4731 | break; |
4732 | } |
4733 | |
4734 | case LogShadowChickenPrologue: { |
4735 | compileLogShadowChickenPrologue(node); |
4736 | break; |
4737 | } |
4738 | |
4739 | case LogShadowChickenTail: { |
4740 | compileLogShadowChickenTail(node); |
4741 | break; |
4742 | } |
4743 | |
4744 | case MaterializeNewObject: |
4745 | compileMaterializeNewObject(node); |
4746 | break; |
4747 | |
4748 | case CallDOM: |
4749 | compileCallDOM(node); |
4750 | break; |
4751 | |
4752 | case CallDOMGetter: |
4753 | compileCallDOMGetter(node); |
4754 | break; |
4755 | |
4756 | case CheckSubClass: |
4757 | compileCheckSubClass(node); |
4758 | break; |
4759 | |
4760 | case ExtractCatchLocal: { |
4761 | compileExtractCatchLocal(node); |
4762 | break; |
4763 | } |
4764 | |
4765 | case ClearCatchLocals: |
4766 | compileClearCatchLocals(node); |
4767 | break; |
4768 | |
4769 | case DataViewGetFloat: |
4770 | case DataViewGetInt: { |
4771 | SpeculateCellOperand dataView(this, node->child1()); |
4772 | GPRReg dataViewGPR = dataView.gpr(); |
4773 | speculateDataViewObject(node->child1(), dataViewGPR); |
4774 | |
4775 | SpeculateInt32Operand index(this, node->child2()); |
4776 | GPRReg indexGPR = index.gpr(); |
4777 | |
4778 | GPRTemporary temp1(this); |
4779 | GPRReg t1 = temp1.gpr(); |
4780 | GPRTemporary temp2(this); |
4781 | GPRReg t2 = temp2.gpr(); |
4782 | |
4783 | Optional<SpeculateBooleanOperand> isLittleEndianOperand; |
4784 | if (node->child3()) |
4785 | isLittleEndianOperand.emplace(this, node->child3()); |
4786 | GPRReg isLittleEndianGPR = isLittleEndianOperand ? isLittleEndianOperand->gpr() : InvalidGPRReg; |
4787 | |
4788 | DataViewData data = node->dataViewData(); |
4789 | |
4790 | m_jit.zeroExtend32ToPtr(indexGPR, t2); |
4791 | if (data.byteSize > 1) |
4792 | m_jit.add64(TrustedImm32(data.byteSize - 1), t2); |
4793 | m_jit.load32(MacroAssembler::Address(dataViewGPR, JSArrayBufferView::offsetOfLength()), t1); |
4794 | speculationCheck(OutOfBounds, JSValueRegs(), node, |
4795 | m_jit.branch64(MacroAssembler::AboveOrEqual, t2, t1)); |
4796 | |
4797 | m_jit.loadPtr(JITCompiler::Address(dataViewGPR, JSArrayBufferView::offsetOfVector()), t2); |
4798 | cageTypedArrayStorage(dataViewGPR, t2); |
4799 | |
4800 | m_jit.zeroExtend32ToPtr(indexGPR, t1); |
4801 | auto baseIndex = JITCompiler::BaseIndex(t2, t1, MacroAssembler::TimesOne); |
4802 | |
4803 | if (node->op() == DataViewGetInt) { |
4804 | switch (data.byteSize) { |
4805 | case 1: |
4806 | if (data.isSigned) |
4807 | m_jit.load8SignedExtendTo32(baseIndex, t2); |
4808 | else |
4809 | m_jit.load8(baseIndex, t2); |
4810 | int32Result(t2, node); |
4811 | break; |
4812 | case 2: { |
4813 | auto emitLittleEndianLoad = [&] { |
4814 | if (data.isSigned) |
4815 | m_jit.load16SignedExtendTo32(baseIndex, t2); |
4816 | else |
4817 | m_jit.load16(baseIndex, t2); |
4818 | }; |
4819 | auto emitBigEndianLoad = [&] { |
4820 | m_jit.load16(baseIndex, t2); |
4821 | m_jit.byteSwap16(t2); |
4822 | if (data.isSigned) |
4823 | m_jit.signExtend16To32(t2, t2); |
4824 | }; |
4825 | |
4826 | if (data.isLittleEndian == FalseTriState) |
4827 | emitBigEndianLoad(); |
4828 | else if (data.isLittleEndian == TrueTriState) |
4829 | emitLittleEndianLoad(); |
4830 | else { |
4831 | RELEASE_ASSERT(isLittleEndianGPR != InvalidGPRReg); |
4832 | auto isBigEndian = m_jit.branchTest32(MacroAssembler::Zero, isLittleEndianGPR, TrustedImm32(1)); |
4833 | emitLittleEndianLoad(); |
4834 | auto done = m_jit.jump(); |
4835 | isBigEndian.link(&m_jit); |
4836 | emitBigEndianLoad(); |
4837 | done.link(&m_jit); |
4838 | } |
4839 | int32Result(t2, node); |
4840 | break; |
4841 | } |
4842 | case 4: { |
4843 | m_jit.load32(baseIndex, t2); |
4844 | |
4845 | if (data.isLittleEndian == FalseTriState) |
4846 | m_jit.byteSwap32(t2); |
4847 | else if (data.isLittleEndian == MixedTriState) { |
4848 | RELEASE_ASSERT(isLittleEndianGPR != InvalidGPRReg); |
4849 | auto isLittleEndian = m_jit.branchTest32(MacroAssembler::NonZero, isLittleEndianGPR, TrustedImm32(1)); |
4850 | m_jit.byteSwap32(t2); |
4851 | isLittleEndian.link(&m_jit); |
4852 | } |
4853 | |
4854 | if (data.isSigned) |
4855 | int32Result(t2, node); |
4856 | else |
4857 | strictInt52Result(t2, node); |
4858 | break; |
4859 | } |
4860 | default: |
4861 | RELEASE_ASSERT_NOT_REACHED(); |
4862 | } |
4863 | } else { |
4864 | FPRTemporary result(this); |
4865 | FPRReg resultFPR = result.fpr(); |
4866 | |
4867 | switch (data.byteSize) { |
4868 | case 4: { |
4869 | auto emitLittleEndianCode = [&] { |
4870 | m_jit.loadFloat(baseIndex, resultFPR); |
4871 | m_jit.convertFloatToDouble(resultFPR, resultFPR); |
4872 | }; |
4873 | |
4874 | auto emitBigEndianCode = [&] { |
4875 | m_jit.load32(baseIndex, t2); |
4876 | m_jit.byteSwap32(t2); |
4877 | m_jit.move32ToFloat(t2, resultFPR); |
4878 | m_jit.convertFloatToDouble(resultFPR, resultFPR); |
4879 | }; |
4880 | |
4881 | if (data.isLittleEndian == TrueTriState) |
4882 | emitLittleEndianCode(); |
4883 | else if (data.isLittleEndian == FalseTriState) |
4884 | emitBigEndianCode(); |
4885 | else { |
4886 | RELEASE_ASSERT(isLittleEndianGPR != InvalidGPRReg); |
4887 | auto isBigEndian = m_jit.branchTest32(MacroAssembler::Zero, isLittleEndianGPR, TrustedImm32(1)); |
4888 | emitLittleEndianCode(); |
4889 | auto done = m_jit.jump(); |
4890 | isBigEndian.link(&m_jit); |
4891 | emitBigEndianCode(); |
4892 | done.link(&m_jit); |
4893 | } |
4894 | |
4895 | break; |
4896 | } |
4897 | case 8: { |
4898 | auto emitLittleEndianCode = [&] { |
4899 | m_jit.loadDouble(baseIndex, resultFPR); |
4900 | }; |
4901 | |
4902 | auto emitBigEndianCode = [&] { |
4903 | m_jit.load64(baseIndex, t2); |
4904 | m_jit.byteSwap64(t2); |
4905 | m_jit.move64ToDouble(t2, resultFPR); |
4906 | }; |
4907 | |
4908 | if (data.isLittleEndian == TrueTriState) |
4909 | emitLittleEndianCode(); |
4910 | else if (data.isLittleEndian == FalseTriState) |
4911 | emitBigEndianCode(); |
4912 | else { |
4913 | RELEASE_ASSERT(isLittleEndianGPR != InvalidGPRReg); |
4914 | auto isBigEndian = m_jit.branchTest32(MacroAssembler::Zero, isLittleEndianGPR, TrustedImm32(1)); |
4915 | emitLittleEndianCode(); |
4916 | auto done = m_jit.jump(); |
4917 | isBigEndian.link(&m_jit); |
4918 | emitBigEndianCode(); |
4919 | done.link(&m_jit); |
4920 | } |
4921 | |
4922 | break; |
4923 | } |
4924 | default: |
4925 | RELEASE_ASSERT_NOT_REACHED(); |
4926 | } |
4927 | |
4928 | doubleResult(resultFPR, node); |
4929 | } |
4930 | |
4931 | break; |
4932 | } |
4933 | |
4934 | case DateGetInt32OrNaN: |
4935 | case DateGetTime: |
4936 | compileDateGet(node); |
4937 | break; |
4938 | |
4939 | case DataViewSet: { |
4940 | SpeculateCellOperand dataView(this, m_graph.varArgChild(node, 0)); |
4941 | GPRReg dataViewGPR = dataView.gpr(); |
4942 | speculateDataViewObject(m_graph.varArgChild(node, 0), dataViewGPR); |
4943 | |
4944 | SpeculateInt32Operand index(this, m_graph.varArgChild(node, 1)); |
4945 | GPRReg indexGPR = index.gpr(); |
4946 | |
4947 | Optional<SpeculateStrictInt52Operand> int52Value; |
4948 | Optional<SpeculateDoubleOperand> doubleValue; |
4949 | Optional<SpeculateInt32Operand> int32Value; |
4950 | Optional<FPRTemporary> fprTemporary; |
4951 | GPRReg valueGPR = InvalidGPRReg; |
4952 | FPRReg valueFPR = InvalidFPRReg; |
4953 | FPRReg tempFPR = InvalidFPRReg; |
4954 | |
4955 | DataViewData data = node->dataViewData(); |
4956 | |
4957 | Edge& valueEdge = m_graph.varArgChild(node, 2); |
4958 | switch (valueEdge.useKind()) { |
4959 | case Int32Use: |
4960 | int32Value.emplace(this, valueEdge); |
4961 | valueGPR = int32Value->gpr(); |
4962 | break; |
4963 | case DoubleRepUse: |
4964 | doubleValue.emplace(this, valueEdge); |
4965 | valueFPR = doubleValue->fpr(); |
4966 | if (data.byteSize == 4) { |
4967 | fprTemporary.emplace(this); |
4968 | tempFPR = fprTemporary->fpr(); |
4969 | } |
4970 | break; |
4971 | case Int52RepUse: |
4972 | int52Value.emplace(this, valueEdge); |
4973 | valueGPR = int52Value->gpr(); |
4974 | break; |
4975 | default: |
4976 | RELEASE_ASSERT_NOT_REACHED(); |
4977 | } |
4978 | |
4979 | GPRTemporary temp1(this); |
4980 | GPRReg t1 = temp1.gpr(); |
4981 | GPRTemporary temp2(this); |
4982 | GPRReg t2 = temp2.gpr(); |
4983 | GPRTemporary temp3(this); |
4984 | GPRReg t3 = temp3.gpr(); |
4985 | |
4986 | Optional<SpeculateBooleanOperand> isLittleEndianOperand; |
4987 | if (m_graph.varArgChild(node, 3)) |
4988 | isLittleEndianOperand.emplace(this, m_graph.varArgChild(node, 3)); |
4989 | GPRReg isLittleEndianGPR = isLittleEndianOperand ? isLittleEndianOperand->gpr() : InvalidGPRReg; |
4990 | |
4991 | m_jit.zeroExtend32ToPtr(indexGPR, t2); |
4992 | if (data.byteSize > 1) |
4993 | m_jit.add64(TrustedImm32(data.byteSize - 1), t2); |
4994 | m_jit.load32(MacroAssembler::Address(dataViewGPR, JSArrayBufferView::offsetOfLength()), t1); |
4995 | speculationCheck(OutOfBounds, JSValueRegs(), node, |
4996 | m_jit.branch64(MacroAssembler::AboveOrEqual, t2, t1)); |
4997 | |
4998 | m_jit.loadPtr(JITCompiler::Address(dataViewGPR, JSArrayBufferView::offsetOfVector()), t2); |
4999 | cageTypedArrayStorage(dataViewGPR, t2); |
5000 | |
5001 | m_jit.zeroExtend32ToPtr(indexGPR, t1); |
5002 | auto baseIndex = JITCompiler::BaseIndex(t2, t1, MacroAssembler::TimesOne); |
5003 | |
5004 | if (data.isFloatingPoint) { |
5005 | RELEASE_ASSERT(valueFPR != InvalidFPRReg); |
5006 | if (data.byteSize == 4) { |
5007 | RELEASE_ASSERT(tempFPR != InvalidFPRReg); |
5008 | m_jit.convertDoubleToFloat(valueFPR, tempFPR); |
5009 | |
5010 | auto emitLittleEndianCode = [&] { |
5011 | m_jit.storeFloat(tempFPR, baseIndex); |
5012 | }; |
5013 | |
5014 | auto emitBigEndianCode = [&] { |
5015 | m_jit.moveFloatTo32(tempFPR, t3); |
5016 | m_jit.byteSwap32(t3); |
5017 | m_jit.store32(t3, baseIndex); |
5018 | }; |
5019 | |
5020 | if (data.isLittleEndian == FalseTriState) |
5021 | emitBigEndianCode(); |
5022 | else if (data.isLittleEndian == TrueTriState) |
5023 | emitLittleEndianCode(); |
5024 | else { |
5025 | RELEASE_ASSERT(isLittleEndianGPR != InvalidGPRReg); |
5026 | auto isBigEndian = m_jit.branchTest32(MacroAssembler::Zero, isLittleEndianGPR, TrustedImm32(1)); |
5027 | emitLittleEndianCode(); |
5028 | auto done = m_jit.jump(); |
5029 | isBigEndian.link(&m_jit); |
5030 | emitBigEndianCode(); |
5031 | done.link(&m_jit); |
5032 | } |
5033 | } else { |
5034 | RELEASE_ASSERT(data.byteSize == 8); |
5035 | RELEASE_ASSERT(valueFPR != InvalidFPRReg); |
5036 | |
5037 | auto emitLittleEndianCode = [&] { |
5038 | m_jit.storeDouble(valueFPR, baseIndex); |
5039 | }; |
5040 | auto emitBigEndianCode = [&] { |
5041 | m_jit.moveDoubleTo64(valueFPR, t3); |
5042 | m_jit.byteSwap64(t3); |
5043 | m_jit.store64(t3, baseIndex); |
5044 | }; |
5045 | |
5046 | if (data.isLittleEndian == FalseTriState) |
5047 | emitBigEndianCode(); |
5048 | else if (data.isLittleEndian == TrueTriState) |
5049 | emitLittleEndianCode(); |
5050 | else { |
5051 | RELEASE_ASSERT(isLittleEndianGPR != InvalidGPRReg); |
5052 | auto isBigEndian = m_jit.branchTest32(MacroAssembler::Zero, isLittleEndianGPR, TrustedImm32(1)); |
5053 | emitLittleEndianCode(); |
5054 | auto done = m_jit.jump(); |
5055 | isBigEndian.link(&m_jit); |
5056 | emitBigEndianCode(); |
5057 | done.link(&m_jit); |
5058 | } |
5059 | } |
5060 | } else { |
5061 | switch (data.byteSize) { |
5062 | case 1: |
5063 | RELEASE_ASSERT(valueEdge.useKind() == Int32Use); |
5064 | RELEASE_ASSERT(valueGPR != InvalidGPRReg); |
5065 | m_jit.store8(valueGPR, baseIndex); |
5066 | break; |
5067 | case 2: { |
5068 | RELEASE_ASSERT(valueEdge.useKind() == Int32Use); |
5069 | RELEASE_ASSERT(valueGPR != InvalidGPRReg); |
5070 | |
5071 | auto emitLittleEndianCode = [&] { |
5072 | m_jit.store16(valueGPR, baseIndex); |
5073 | }; |
5074 | auto emitBigEndianCode = [&] { |
5075 | m_jit.move(valueGPR, t3); |
5076 | m_jit.byteSwap16(t3); |
5077 | m_jit.store16(t3, baseIndex); |
5078 | }; |
5079 | |
5080 | if (data.isLittleEndian == FalseTriState) |
5081 | emitBigEndianCode(); |
5082 | else if (data.isLittleEndian == TrueTriState) |
5083 | emitLittleEndianCode(); |
5084 | else { |
5085 | RELEASE_ASSERT(isLittleEndianGPR != InvalidGPRReg); |
5086 | auto isBigEndian = m_jit.branchTest32(MacroAssembler::Zero, isLittleEndianGPR, TrustedImm32(1)); |
5087 | emitLittleEndianCode(); |
5088 | auto done = m_jit.jump(); |
5089 | isBigEndian.link(&m_jit); |
5090 | emitBigEndianCode(); |
5091 | done.link(&m_jit); |
5092 | } |
5093 | break; |
5094 | } |
5095 | case 4: { |
5096 | RELEASE_ASSERT(valueEdge.useKind() == Int32Use || valueEdge.useKind() == Int52RepUse); |
5097 | |
5098 | auto emitLittleEndianCode = [&] { |
5099 | m_jit.store32(valueGPR, baseIndex); |
5100 | }; |
5101 | |
5102 | auto emitBigEndianCode = [&] { |
5103 | m_jit.zeroExtend32ToPtr(valueGPR, t3); |
5104 | m_jit.byteSwap32(t3); |
5105 | m_jit.store32(t3, baseIndex); |
5106 | }; |
5107 | |
5108 | if (data.isLittleEndian == FalseTriState) |
5109 | emitBigEndianCode(); |
5110 | else if (data.isLittleEndian == TrueTriState) |
5111 | emitLittleEndianCode(); |
5112 | else { |
5113 | RELEASE_ASSERT(isLittleEndianGPR != InvalidGPRReg); |
5114 | auto isBigEndian = m_jit.branchTest32(MacroAssembler::Zero, isLittleEndianGPR, TrustedImm32(1)); |
5115 | emitLittleEndianCode(); |
5116 | auto done = m_jit.jump(); |
5117 | isBigEndian.link(&m_jit); |
5118 | emitBigEndianCode(); |
5119 | done.link(&m_jit); |
5120 | } |
5121 | |
5122 | break; |
5123 | } |
5124 | default: |
5125 | RELEASE_ASSERT_NOT_REACHED(); |
5126 | } |
5127 | } |
5128 | |
5129 | noResult(node); |
5130 | break; |
5131 | } |
5132 | |
5133 | #if ENABLE(FTL_JIT) |
5134 | case CheckTierUpInLoop: { |
5135 | MacroAssembler::Jump callTierUp = m_jit.branchAdd32( |
5136 | MacroAssembler::PositiveOrZero, |
5137 | TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()), |
5138 | MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->tierUpCounter.m_counter)); |
5139 | |
5140 | MacroAssembler::Label toNextOperation = m_jit.label(); |
5141 | |
5142 | Vector<SilentRegisterSavePlan> savePlans; |
5143 | silentSpillAllRegistersImpl(false, savePlans, InvalidGPRReg); |
5144 | BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex(); |
5145 | |
5146 | addSlowPathGeneratorLambda([=]() { |
5147 | callTierUp.link(&m_jit); |
5148 | |
5149 | silentSpill(savePlans); |
5150 | callOperation(operationTriggerTierUpNowInLoop, &vm(), TrustedImm32(bytecodeIndex.asBits())); |
5151 | silentFill(savePlans); |
5152 | |
5153 | m_jit.jump().linkTo(toNextOperation, &m_jit); |
5154 | }); |
5155 | break; |
5156 | } |
5157 | |
5158 | case CheckTierUpAtReturn: { |
5159 | MacroAssembler::Jump done = m_jit.branchAdd32( |
5160 | MacroAssembler::Signed, |
5161 | TrustedImm32(Options::ftlTierUpCounterIncrementForReturn()), |
5162 | MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->tierUpCounter.m_counter)); |
5163 | |
5164 | silentSpillAllRegisters(InvalidGPRReg); |
5165 | callOperation(operationTriggerTierUpNow, &vm()); |
5166 | silentFillAllRegisters(); |
5167 | |
5168 | done.link(&m_jit); |
5169 | break; |
5170 | } |
5171 | |
5172 | case CheckTierUpAndOSREnter: { |
5173 | ASSERT(!node->origin.semantic.inlineCallFrame()); |
5174 | |
5175 | GPRTemporary temp(this); |
5176 | GPRReg tempGPR = temp.gpr(); |
5177 | |
5178 | BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex(); |
5179 | auto triggerIterator = m_jit.jitCode()->tierUpEntryTriggers.find(bytecodeIndex); |
5180 | DFG_ASSERT(m_jit.graph(), node, triggerIterator != m_jit.jitCode()->tierUpEntryTriggers.end()); |
5181 | JITCode::TriggerReason* forceEntryTrigger = &(m_jit.jitCode()->tierUpEntryTriggers.find(bytecodeIndex)->value); |
5182 | static_assert(!static_cast<uint8_t>(JITCode::TriggerReason::DontTrigger), "the JIT code assumes non-zero means 'enter'" ); |
5183 | static_assert(sizeof(JITCode::TriggerReason) == 1, "branchTest8 assumes this size" ); |
5184 | |
5185 | MacroAssembler::Jump forceOSREntry = m_jit.branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(forceEntryTrigger)); |
5186 | MacroAssembler::Jump overflowedCounter = m_jit.branchAdd32( |
5187 | MacroAssembler::PositiveOrZero, |
5188 | TrustedImm32(Options::ftlTierUpCounterIncrementForLoop()), |
5189 | MacroAssembler::AbsoluteAddress(&m_jit.jitCode()->tierUpCounter.m_counter)); |
5190 | MacroAssembler::Label toNextOperation = m_jit.label(); |
5191 | |
5192 | Vector<SilentRegisterSavePlan> savePlans; |
5193 | silentSpillAllRegistersImpl(false, savePlans, tempGPR); |
5194 | |
5195 | unsigned streamIndex = m_stream->size(); |
5196 | m_jit.jitCode()->bytecodeIndexToStreamIndex.add(bytecodeIndex, streamIndex); |
5197 | |
5198 | addSlowPathGeneratorLambda([=]() { |
5199 | forceOSREntry.link(&m_jit); |
5200 | overflowedCounter.link(&m_jit); |
5201 | |
5202 | silentSpill(savePlans); |
5203 | callOperation(operationTriggerOSREntryNow, tempGPR, &vm(), TrustedImm32(bytecodeIndex.asBits())); |
5204 | |
5205 | if (savePlans.isEmpty()) |
5206 | m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR).linkTo(toNextOperation, &m_jit); |
5207 | else { |
5208 | MacroAssembler::Jump osrEnter = m_jit.branchTestPtr(MacroAssembler::NonZero, tempGPR); |
5209 | silentFill(savePlans); |
5210 | m_jit.jump().linkTo(toNextOperation, &m_jit); |
5211 | osrEnter.link(&m_jit); |
5212 | } |
5213 | m_jit.emitRestoreCalleeSaves(); |
5214 | m_jit.farJump(tempGPR, GPRInfo::callFrameRegister); |
5215 | }); |
5216 | break; |
5217 | } |
5218 | |
5219 | #else // ENABLE(FTL_JIT) |
5220 | case CheckTierUpInLoop: |
5221 | case CheckTierUpAtReturn: |
5222 | case CheckTierUpAndOSREnter: |
5223 | DFG_CRASH(m_jit.graph(), node, "Unexpected tier-up node" ); |
5224 | break; |
5225 | #endif // ENABLE(FTL_JIT) |
5226 | |
5227 | case FilterCallLinkStatus: |
5228 | case FilterGetByStatus: |
5229 | case FilterPutByIdStatus: |
5230 | case FilterInByIdStatus: |
5231 | m_interpreter.filterICStatus(node); |
5232 | noResult(node); |
5233 | break; |
5234 | |
5235 | case LastNodeType: |
5236 | case EntrySwitch: |
5237 | case InitializeEntrypointArguments: |
5238 | case Phi: |
5239 | case Upsilon: |
5240 | case ExtractOSREntryLocal: |
5241 | case CheckInBounds: |
5242 | case ArithIMul: |
5243 | case MultiGetByOffset: |
5244 | case MultiPutByOffset: |
5245 | case FiatInt52: |
5246 | case CheckBadCell: |
5247 | case BottomValue: |
5248 | case PhantomNewObject: |
5249 | case PhantomNewFunction: |
5250 | case PhantomNewGeneratorFunction: |
5251 | case PhantomNewAsyncFunction: |
5252 | case PhantomNewAsyncGeneratorFunction: |
5253 | case PhantomCreateActivation: |
5254 | case PhantomNewRegexp: |
5255 | case GetMyArgumentByVal: |
5256 | case GetMyArgumentByValOutOfBounds: |
5257 | case GetVectorLength: |
5258 | case PutHint: |
5259 | case CheckStructureImmediate: |
5260 | case MaterializeCreateActivation: |
5261 | case PutStack: |
5262 | case KillStack: |
5263 | case GetStack: |
5264 | case PhantomCreateRest: |
5265 | case PhantomSpread: |
5266 | case PhantomNewArrayWithSpread: |
5267 | case PhantomNewArrayBuffer: |
5268 | case IdentityWithProfile: |
5269 | case CPUIntrinsic: |
5270 | DFG_CRASH(m_jit.graph(), node, "Unexpected node" ); |
5271 | break; |
5272 | } |
5273 | |
5274 | if (!m_compileOkay) |
5275 | return; |
5276 | |
5277 | if (node->hasResult() && node->mustGenerate()) |
5278 | use(node); |
5279 | } |
5280 | |
5281 | void SpeculativeJIT::moveTrueTo(GPRReg gpr) |
5282 | { |
5283 | m_jit.move(TrustedImm32(JSValue::ValueTrue), gpr); |
5284 | } |
5285 | |
5286 | void SpeculativeJIT::moveFalseTo(GPRReg gpr) |
5287 | { |
5288 | m_jit.move(TrustedImm32(JSValue::ValueFalse), gpr); |
5289 | } |
5290 | |
5291 | void SpeculativeJIT::blessBoolean(GPRReg gpr) |
5292 | { |
5293 | m_jit.or32(TrustedImm32(JSValue::ValueFalse), gpr); |
5294 | } |
5295 | |
5296 | void SpeculativeJIT::convertAnyInt(Edge valueEdge, GPRReg resultGPR) |
5297 | { |
5298 | JSValueOperand value(this, valueEdge, ManualOperandSpeculation); |
5299 | GPRReg valueGPR = value.gpr(); |
5300 | |
5301 | JITCompiler::Jump notInt32 = m_jit.branchIfNotInt32(valueGPR); |
5302 | |
5303 | m_jit.signExtend32ToPtr(valueGPR, resultGPR); |
5304 | JITCompiler::Jump done = m_jit.jump(); |
5305 | |
5306 | notInt32.link(&m_jit); |
5307 | silentSpillAllRegisters(resultGPR); |
5308 | callOperation(operationConvertBoxedDoubleToInt52, resultGPR, valueGPR); |
5309 | silentFillAllRegisters(); |
5310 | |
5311 | DFG_TYPE_CHECK( |
5312 | JSValueRegs(valueGPR), valueEdge, SpecInt32Only | SpecAnyIntAsDouble, |
5313 | m_jit.branch64( |
5314 | JITCompiler::Equal, resultGPR, |
5315 | JITCompiler::TrustedImm64(JSValue::notInt52))); |
5316 | done.link(&m_jit); |
5317 | } |
5318 | |
5319 | void SpeculativeJIT::speculateAnyInt(Edge edge) |
5320 | { |
5321 | if (!needsTypeCheck(edge, SpecInt32Only | SpecAnyIntAsDouble)) |
5322 | return; |
5323 | |
5324 | GPRTemporary temp(this); |
5325 | convertAnyInt(edge, temp.gpr()); |
5326 | } |
5327 | |
5328 | void SpeculativeJIT::speculateInt32(Edge edge, JSValueRegs regs) |
5329 | { |
5330 | DFG_TYPE_CHECK(regs, edge, SpecInt32Only, m_jit.branchIfNotInt32(regs)); |
5331 | } |
5332 | |
5333 | void SpeculativeJIT::speculateDoubleRepAnyInt(Edge edge) |
5334 | { |
5335 | if (!needsTypeCheck(edge, SpecAnyIntAsDouble)) |
5336 | return; |
5337 | |
5338 | SpeculateDoubleOperand value(this, edge); |
5339 | FPRReg valueFPR = value.fpr(); |
5340 | |
5341 | flushRegisters(); |
5342 | GPRFlushedCallResult result(this); |
5343 | GPRReg resultGPR = result.gpr(); |
5344 | callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR); |
5345 | |
5346 | DFG_TYPE_CHECK( |
5347 | JSValueRegs(), edge, SpecAnyIntAsDouble, |
5348 | m_jit.branch64( |
5349 | JITCompiler::Equal, resultGPR, |
5350 | JITCompiler::TrustedImm64(JSValue::notInt52))); |
5351 | } |
5352 | |
5353 | void SpeculativeJIT::compileArithRandom(Node* node) |
5354 | { |
5355 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); |
5356 | GPRTemporary temp1(this); |
5357 | GPRTemporary temp2(this); |
5358 | GPRTemporary temp3(this); |
5359 | FPRTemporary result(this); |
5360 | m_jit.emitRandomThunk(globalObject, temp1.gpr(), temp2.gpr(), temp3.gpr(), result.fpr()); |
5361 | doubleResult(result.fpr(), node); |
5362 | } |
5363 | |
5364 | void SpeculativeJIT::compileStringCodePointAt(Node* node) |
5365 | { |
5366 | // We emit CheckArray on this node as we do in StringCharCodeAt node so that we do not need to check SpecString here. |
5367 | // And CheckArray also ensures that this String is not a rope. |
5368 | SpeculateCellOperand string(this, node->child1()); |
5369 | SpeculateStrictInt32Operand index(this, node->child2()); |
5370 | StorageOperand storage(this, node->child3()); |
5371 | GPRTemporary scratch1(this); |
5372 | GPRTemporary scratch2(this); |
5373 | GPRTemporary scratch3(this); |
5374 | |
5375 | GPRReg stringGPR = string.gpr(); |
5376 | GPRReg indexGPR = index.gpr(); |
5377 | GPRReg storageGPR = storage.gpr(); |
5378 | GPRReg scratch1GPR = scratch1.gpr(); |
5379 | GPRReg scratch2GPR = scratch2.gpr(); |
5380 | GPRReg scratch3GPR = scratch3.gpr(); |
5381 | |
5382 | m_jit.loadPtr(CCallHelpers::Address(stringGPR, JSString::offsetOfValue()), scratch1GPR); |
5383 | m_jit.load32(CCallHelpers::Address(scratch1GPR, StringImpl::lengthMemoryOffset()), scratch2GPR); |
5384 | |
5385 | // unsigned comparison so we can filter out negative indices and indices that are too large |
5386 | speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, scratch2GPR)); |
5387 | |
5388 | // Load the character into scratch1GPR |
5389 | auto is16Bit = m_jit.branchTest32(CCallHelpers::Zero, CCallHelpers::Address(scratch1GPR, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit())); |
5390 | |
5391 | CCallHelpers::JumpList done; |
5392 | |
5393 | m_jit.load8(CCallHelpers::BaseIndex(storageGPR, indexGPR, CCallHelpers::TimesOne, 0), scratch1GPR); |
5394 | done.append(m_jit.jump()); |
5395 | |
5396 | is16Bit.link(&m_jit); |
5397 | m_jit.load16(CCallHelpers::BaseIndex(storageGPR, indexGPR, CCallHelpers::TimesTwo, 0), scratch1GPR); |
5398 | // This is ok. indexGPR must be positive int32_t here and adding 1 never causes overflow if we treat indexGPR as uint32_t. |
5399 | m_jit.add32(CCallHelpers::TrustedImm32(1), indexGPR, scratch3GPR); |
5400 | done.append(m_jit.branch32(CCallHelpers::AboveOrEqual, scratch3GPR, scratch2GPR)); |
5401 | m_jit.and32(CCallHelpers::TrustedImm32(0xfffffc00), scratch1GPR, scratch2GPR); |
5402 | done.append(m_jit.branch32(CCallHelpers::NotEqual, scratch2GPR, CCallHelpers::TrustedImm32(0xd800))); |
5403 | m_jit.load16(CCallHelpers::BaseIndex(storageGPR, scratch3GPR, CCallHelpers::TimesTwo, 0), scratch3GPR); |
5404 | m_jit.and32(CCallHelpers::TrustedImm32(0xfffffc00), scratch3GPR, scratch2GPR); |
5405 | done.append(m_jit.branch32(CCallHelpers::NotEqual, scratch2GPR, CCallHelpers::TrustedImm32(0xdc00))); |
5406 | m_jit.lshift32(CCallHelpers::TrustedImm32(10), scratch1GPR); |
5407 | m_jit.getEffectiveAddress(CCallHelpers::BaseIndex(scratch1GPR, scratch3GPR, CCallHelpers::TimesOne, -U16_SURROGATE_OFFSET), scratch1GPR); |
5408 | done.link(&m_jit); |
5409 | |
5410 | int32Result(scratch1GPR, m_currentNode); |
5411 | } |
5412 | |
5413 | void SpeculativeJIT::compileDateGet(Node* node) |
5414 | { |
5415 | SpeculateCellOperand base(this, node->child1()); |
5416 | GPRReg baseGPR = base.gpr(); |
5417 | speculateDateObject(node->child1(), baseGPR); |
5418 | |
5419 | auto emitGetCodeWithCallback = [&] (ptrdiff_t cachedDoubleOffset, ptrdiff_t cachedDataOffset, auto* operation, auto callback) { |
5420 | JSValueRegsTemporary result(this); |
5421 | FPRTemporary temp1(this); |
5422 | FPRTemporary temp2(this); |
5423 | |
5424 | JSValueRegs resultRegs = result.regs(); |
5425 | FPRReg temp1FPR = temp1.fpr(); |
5426 | FPRReg temp2FPR = temp2.fpr(); |
5427 | |
5428 | CCallHelpers::JumpList slowCases; |
5429 | |
5430 | m_jit.loadPtr(CCallHelpers::Address(baseGPR, DateInstance::offsetOfData()), resultRegs.payloadGPR()); |
5431 | slowCases.append(m_jit.branchTestPtr(CCallHelpers::Zero, resultRegs.payloadGPR())); |
5432 | m_jit.loadDouble(CCallHelpers::Address(baseGPR, DateInstance::offsetOfInternalNumber()), temp1FPR); |
5433 | m_jit.loadDouble(CCallHelpers::Address(resultRegs.payloadGPR(), cachedDoubleOffset), temp2FPR); |
5434 | slowCases.append(m_jit.branchDouble(CCallHelpers::DoubleNotEqualOrUnordered, temp1FPR, temp2FPR)); |
5435 | m_jit.load32(CCallHelpers::Address(resultRegs.payloadGPR(), cachedDataOffset), resultRegs.payloadGPR()); |
5436 | callback(resultRegs.payloadGPR()); |
5437 | m_jit.boxInt32(resultRegs.payloadGPR(), resultRegs); |
5438 | |
5439 | addSlowPathGenerator(slowPathCall(slowCases, this, operation, resultRegs, &vm(), baseGPR)); |
5440 | |
5441 | jsValueResult(resultRegs, node); |
5442 | }; |
5443 | |
5444 | auto emitGetCode = [&] (ptrdiff_t cachedDoubleOffset, ptrdiff_t cachedDataOffset, auto* operation) { |
5445 | emitGetCodeWithCallback(cachedDoubleOffset, cachedDataOffset, operation, [] (GPRReg) { }); |
5446 | }; |
5447 | |
5448 | switch (node->intrinsic()) { |
5449 | case DatePrototypeGetTimeIntrinsic: { |
5450 | FPRTemporary result(this); |
5451 | FPRReg resultFPR = result.fpr(); |
5452 | m_jit.loadDouble(CCallHelpers::Address(baseGPR, DateInstance::offsetOfInternalNumber()), resultFPR); |
5453 | doubleResult(resultFPR, node); |
5454 | break; |
5455 | } |
5456 | |
5457 | // We do not have any timezone offset which affects on milliseconds. |
5458 | // So Date#getMilliseconds and Date#getUTCMilliseconds have the same implementation. |
5459 | case DatePrototypeGetMillisecondsIntrinsic: |
5460 | case DatePrototypeGetUTCMillisecondsIntrinsic: { |
5461 | JSValueRegsTemporary result(this); |
5462 | FPRTemporary temp1(this); |
5463 | FPRTemporary temp2(this); |
5464 | FPRTemporary temp3(this); |
5465 | JSValueRegs resultRegs = result.regs(); |
5466 | FPRReg temp1FPR = temp1.fpr(); |
5467 | FPRReg temp2FPR = temp2.fpr(); |
5468 | FPRReg temp3FPR = temp3.fpr(); |
5469 | |
5470 | m_jit.moveTrustedValue(jsNaN(), resultRegs); |
5471 | m_jit.loadDouble(CCallHelpers::Address(baseGPR, DateInstance::offsetOfInternalNumber()), temp1FPR); |
5472 | auto isNaN = m_jit.branchIfNaN(temp1FPR); |
5473 | |
5474 | static const double msPerSecondConstant = msPerSecond; |
5475 | m_jit.loadDouble(TrustedImmPtr(&msPerSecondConstant), temp2FPR); |
5476 | m_jit.divDouble(temp1FPR, temp2FPR, temp3FPR); |
5477 | m_jit.floorDouble(temp3FPR, temp3FPR); |
5478 | m_jit.mulDouble(temp3FPR, temp2FPR, temp3FPR); |
5479 | m_jit.subDouble(temp1FPR, temp3FPR, temp1FPR); |
5480 | m_jit.truncateDoubleToInt32(temp1FPR, resultRegs.payloadGPR()); |
5481 | m_jit.boxInt32(resultRegs.payloadGPR(), resultRegs); |
5482 | |
5483 | isNaN.link(&m_jit); |
5484 | jsValueResult(resultRegs, node); |
5485 | break; |
5486 | } |
5487 | |
5488 | case DatePrototypeGetFullYearIntrinsic: |
5489 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTime() + GregorianDateTime::offsetOfYear(), operationDateGetFullYear); |
5490 | break; |
5491 | case DatePrototypeGetUTCFullYearIntrinsic: |
5492 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeUTCCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTimeUTC() + GregorianDateTime::offsetOfYear(), operationDateGetUTCFullYear); |
5493 | break; |
5494 | case DatePrototypeGetMonthIntrinsic: |
5495 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTime() + GregorianDateTime::offsetOfMonth(), operationDateGetMonth); |
5496 | break; |
5497 | case DatePrototypeGetUTCMonthIntrinsic: |
5498 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeUTCCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTimeUTC() + GregorianDateTime::offsetOfMonth(), operationDateGetUTCMonth); |
5499 | break; |
5500 | case DatePrototypeGetDateIntrinsic: |
5501 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTime() + GregorianDateTime::offsetOfMonthDay(), operationDateGetDate); |
5502 | break; |
5503 | case DatePrototypeGetUTCDateIntrinsic: |
5504 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeUTCCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTimeUTC() + GregorianDateTime::offsetOfMonthDay(), operationDateGetUTCDate); |
5505 | break; |
5506 | case DatePrototypeGetDayIntrinsic: |
5507 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTime() + GregorianDateTime::offsetOfWeekDay(), operationDateGetDay); |
5508 | break; |
5509 | case DatePrototypeGetUTCDayIntrinsic: |
5510 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeUTCCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTimeUTC() + GregorianDateTime::offsetOfWeekDay(), operationDateGetUTCDay); |
5511 | break; |
5512 | case DatePrototypeGetHoursIntrinsic: |
5513 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTime() + GregorianDateTime::offsetOfHour(), operationDateGetHours); |
5514 | break; |
5515 | case DatePrototypeGetUTCHoursIntrinsic: |
5516 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeUTCCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTimeUTC() + GregorianDateTime::offsetOfHour(), operationDateGetUTCHours); |
5517 | break; |
5518 | case DatePrototypeGetMinutesIntrinsic: |
5519 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTime() + GregorianDateTime::offsetOfMinute(), operationDateGetMinutes); |
5520 | break; |
5521 | case DatePrototypeGetUTCMinutesIntrinsic: |
5522 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeUTCCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTimeUTC() + GregorianDateTime::offsetOfMinute(), operationDateGetUTCMinutes); |
5523 | break; |
5524 | case DatePrototypeGetSecondsIntrinsic: |
5525 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTime() + GregorianDateTime::offsetOfSecond(), operationDateGetSeconds); |
5526 | break; |
5527 | case DatePrototypeGetUTCSecondsIntrinsic: |
5528 | emitGetCode(DateInstanceData::offsetOfGregorianDateTimeUTCCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTimeUTC() + GregorianDateTime::offsetOfSecond(), operationDateGetUTCSeconds); |
5529 | break; |
5530 | |
5531 | case DatePrototypeGetTimezoneOffsetIntrinsic: { |
5532 | emitGetCodeWithCallback(DateInstanceData::offsetOfGregorianDateTimeCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTime() + GregorianDateTime::offsetOfUTCOffsetInMinute(), operationDateGetTimezoneOffset, [&] (GPRReg offsetGPR) { |
5533 | m_jit.neg32(offsetGPR); |
5534 | }); |
5535 | break; |
5536 | } |
5537 | |
5538 | case DatePrototypeGetYearIntrinsic: { |
5539 | emitGetCodeWithCallback(DateInstanceData::offsetOfGregorianDateTimeCachedForMS(), DateInstanceData::offsetOfCachedGregorianDateTime() + GregorianDateTime::offsetOfYear(), operationDateGetYear, [&] (GPRReg yearGPR) { |
5540 | m_jit.sub32(TrustedImm32(1900), yearGPR); |
5541 | }); |
5542 | break; |
5543 | } |
5544 | |
5545 | default: |
5546 | RELEASE_ASSERT_NOT_REACHED(); |
5547 | } |
5548 | } |
5549 | |
5550 | #endif |
5551 | |
5552 | } } // namespace JSC::DFG |
5553 | |
5554 | #endif |
5555 | |