1 | /* |
2 | * Copyright (C) 2011-2017 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2011 Intel Corporation. All rights reserved. |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * 1. Redistributions of source code must retain the above copyright |
9 | * notice, this list of conditions and the following disclaimer. |
10 | * 2. Redistributions in binary form must reproduce the above copyright |
11 | * notice, this list of conditions and the following disclaimer in the |
12 | * documentation and/or other materials provided with the distribution. |
13 | * |
14 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | */ |
26 | |
27 | #include "config.h" |
28 | #include "DFGSpeculativeJIT.h" |
29 | |
30 | #if ENABLE(DFG_JIT) |
31 | |
32 | #include "ArrayPrototype.h" |
33 | #include "CallFrameShuffler.h" |
34 | #include "DFGAbstractInterpreterInlines.h" |
35 | #include "DFGCallArrayAllocatorSlowPathGenerator.h" |
36 | #include "DFGOperations.h" |
37 | #include "DFGSlowPathGenerator.h" |
38 | #include "DirectArguments.h" |
39 | #include "GetterSetter.h" |
40 | #include "HasOwnPropertyCache.h" |
41 | #include "HashMapImpl.h" |
42 | #include "JSLexicalEnvironment.h" |
43 | #include "JSPropertyNameEnumerator.h" |
44 | #include "ObjectPrototype.h" |
45 | #include "JSCInlines.h" |
46 | #include "SetupVarargsFrame.h" |
47 | #include "SuperSampler.h" |
48 | #include "Watchdog.h" |
49 | |
50 | namespace JSC { namespace DFG { |
51 | |
52 | #if USE(JSVALUE32_64) |
53 | |
54 | static_assert(SpecCellCheck == SpecCell, "This is strongly assumed in the 32-bit DFG backend." ); |
55 | |
56 | bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr) |
57 | { |
58 | // FIXME: For double we could fill with a FPR. |
59 | UNUSED_PARAM(fpr); |
60 | |
61 | VirtualRegister virtualRegister = edge->virtualRegister(); |
62 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
63 | |
64 | switch (info.registerFormat()) { |
65 | case DataFormatNone: { |
66 | |
67 | if (edge->hasConstant()) { |
68 | tagGPR = allocate(); |
69 | payloadGPR = allocate(); |
70 | JSValue value = edge->asJSValue(); |
71 | m_jit.move(Imm32(value.tag()), tagGPR); |
72 | m_jit.move(Imm32(value.payload()), payloadGPR); |
73 | m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant); |
74 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant); |
75 | info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS); |
76 | } else { |
77 | DataFormat spillFormat = info.spillFormat(); |
78 | ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage); |
79 | tagGPR = allocate(); |
80 | payloadGPR = allocate(); |
81 | switch (spillFormat) { |
82 | case DataFormatInt32: |
83 | m_jit.move(TrustedImm32(JSValue::Int32Tag), tagGPR); |
84 | spillFormat = DataFormatJSInt32; // This will be used as the new register format. |
85 | break; |
86 | case DataFormatCell: |
87 | m_jit.move(TrustedImm32(JSValue::CellTag), tagGPR); |
88 | spillFormat = DataFormatJSCell; // This will be used as the new register format. |
89 | break; |
90 | case DataFormatBoolean: |
91 | m_jit.move(TrustedImm32(JSValue::BooleanTag), tagGPR); |
92 | spillFormat = DataFormatJSBoolean; // This will be used as the new register format. |
93 | break; |
94 | default: |
95 | m_jit.load32(JITCompiler::tagFor(virtualRegister), tagGPR); |
96 | break; |
97 | } |
98 | m_jit.load32(JITCompiler::payloadFor(virtualRegister), payloadGPR); |
99 | m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled); |
100 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled); |
101 | info.fillJSValue(*m_stream, tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat); |
102 | } |
103 | |
104 | return true; |
105 | } |
106 | |
107 | case DataFormatInt32: |
108 | case DataFormatCell: |
109 | case DataFormatBoolean: { |
110 | GPRReg gpr = info.gpr(); |
111 | // If the register has already been locked we need to take a copy. |
112 | if (m_gprs.isLocked(gpr)) { |
113 | payloadGPR = allocate(); |
114 | m_jit.move(gpr, payloadGPR); |
115 | } else { |
116 | payloadGPR = gpr; |
117 | m_gprs.lock(gpr); |
118 | } |
119 | tagGPR = allocate(); |
120 | int32_t tag = JSValue::EmptyValueTag; |
121 | DataFormat fillFormat = DataFormatJS; |
122 | switch (info.registerFormat()) { |
123 | case DataFormatInt32: |
124 | tag = JSValue::Int32Tag; |
125 | fillFormat = DataFormatJSInt32; |
126 | break; |
127 | case DataFormatCell: |
128 | tag = JSValue::CellTag; |
129 | fillFormat = DataFormatJSCell; |
130 | break; |
131 | case DataFormatBoolean: |
132 | tag = JSValue::BooleanTag; |
133 | fillFormat = DataFormatJSBoolean; |
134 | break; |
135 | default: |
136 | RELEASE_ASSERT_NOT_REACHED(); |
137 | break; |
138 | } |
139 | m_jit.move(TrustedImm32(tag), tagGPR); |
140 | m_gprs.release(gpr); |
141 | m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS); |
142 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS); |
143 | info.fillJSValue(*m_stream, tagGPR, payloadGPR, fillFormat); |
144 | return true; |
145 | } |
146 | |
147 | case DataFormatJSDouble: |
148 | case DataFormatJS: |
149 | case DataFormatJSInt32: |
150 | case DataFormatJSCell: |
151 | case DataFormatJSBoolean: { |
152 | tagGPR = info.tagGPR(); |
153 | payloadGPR = info.payloadGPR(); |
154 | m_gprs.lock(tagGPR); |
155 | m_gprs.lock(payloadGPR); |
156 | return true; |
157 | } |
158 | |
159 | case DataFormatStorage: |
160 | case DataFormatDouble: |
161 | // this type currently never occurs |
162 | RELEASE_ASSERT_NOT_REACHED(); |
163 | |
164 | default: |
165 | RELEASE_ASSERT_NOT_REACHED(); |
166 | return true; |
167 | } |
168 | } |
169 | |
170 | void SpeculativeJIT::cachedGetById(CodeOrigin origin, JSValueRegs base, JSValueRegs result, unsigned identifierNumber, JITCompiler::Jump slowPathTarget , SpillRegistersMode mode, AccessType type) |
171 | { |
172 | cachedGetById(origin, base.tagGPR(), base.payloadGPR(), result.tagGPR(), result.payloadGPR(), identifierNumber, slowPathTarget, mode, type); |
173 | } |
174 | |
175 | void SpeculativeJIT::cachedGetById( |
176 | CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, |
177 | unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode, AccessType type) |
178 | { |
179 | // This is a hacky fix for when the register allocator decides to alias the base payload with the result tag. This only happens |
180 | // in the case of GetByIdFlush/GetByIdDirectFlush, which has a relatively expensive register allocation story already so we probably don't need to |
181 | // trip over one move instruction. |
182 | if (basePayloadGPR == resultTagGPR) { |
183 | RELEASE_ASSERT(basePayloadGPR != resultPayloadGPR); |
184 | |
185 | if (baseTagGPROrNone == resultPayloadGPR) { |
186 | m_jit.swap(basePayloadGPR, baseTagGPROrNone); |
187 | baseTagGPROrNone = resultTagGPR; |
188 | } else |
189 | m_jit.move(basePayloadGPR, resultPayloadGPR); |
190 | basePayloadGPR = resultPayloadGPR; |
191 | } |
192 | |
193 | RegisterSet usedRegisters = this->usedRegisters(); |
194 | if (spillMode == DontSpill) { |
195 | // We've already flushed registers to the stack, we don't need to spill these. |
196 | usedRegisters.set(JSValueRegs(baseTagGPROrNone, basePayloadGPR), false); |
197 | usedRegisters.set(JSValueRegs(resultTagGPR, resultPayloadGPR), false); |
198 | } |
199 | |
200 | CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); |
201 | JITGetByIdGenerator gen( |
202 | m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), |
203 | JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(resultTagGPR, resultPayloadGPR), type); |
204 | |
205 | gen.generateFastPath(m_jit); |
206 | |
207 | JITCompiler::JumpList slowCases; |
208 | if (slowPathTarget.isSet()) |
209 | slowCases.append(slowPathTarget); |
210 | slowCases.append(gen.slowPathJump()); |
211 | |
212 | std::unique_ptr<SlowPathGenerator> slowPath; |
213 | if (baseTagGPROrNone == InvalidGPRReg) { |
214 | slowPath = slowPathCall( |
215 | slowCases, this, appropriateOptimizingGetByIdFunction(type), |
216 | JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), |
217 | CCallHelpers::CellValue(basePayloadGPR), |
218 | identifierUID(identifierNumber)); |
219 | } else { |
220 | slowPath = slowPathCall( |
221 | slowCases, this, appropriateOptimizingGetByIdFunction(type), |
222 | JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), JSValueRegs(baseTagGPROrNone, basePayloadGPR), identifierUID(identifierNumber)); |
223 | } |
224 | |
225 | m_jit.addGetById(gen, slowPath.get()); |
226 | addSlowPathGenerator(WTFMove(slowPath)); |
227 | } |
228 | |
229 | void SpeculativeJIT::cachedGetByIdWithThis( |
230 | CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg thisTagGPR, GPRReg thisPayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, |
231 | unsigned identifierNumber, const JITCompiler::JumpList& slowPathTarget) |
232 | { |
233 | RegisterSet usedRegisters = this->usedRegisters(); |
234 | |
235 | CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size()); |
236 | JITGetByIdWithThisGenerator gen( |
237 | m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber), |
238 | JSValueRegs(resultTagGPR, resultPayloadGPR), JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(thisTagGPR, thisPayloadGPR), AccessType::GetWithThis); |
239 | |
240 | gen.generateFastPath(m_jit); |
241 | |
242 | JITCompiler::JumpList slowCases; |
243 | if (!slowPathTarget.empty()) |
244 | slowCases.append(slowPathTarget); |
245 | slowCases.append(gen.slowPathJump()); |
246 | |
247 | std::unique_ptr<SlowPathGenerator> slowPath; |
248 | if (baseTagGPROrNone == InvalidGPRReg && thisTagGPR == InvalidGPRReg) { |
249 | slowPath = slowPathCall( |
250 | slowCases, this, operationGetByIdWithThisOptimize, |
251 | JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), |
252 | CCallHelpers::CellValue(basePayloadGPR), |
253 | CCallHelpers::CellValue(thisPayloadGPR), |
254 | identifierUID(identifierNumber)); |
255 | } else { |
256 | ASSERT(baseTagGPROrNone != InvalidGPRReg); |
257 | ASSERT(thisTagGPR != InvalidGPRReg); |
258 | |
259 | slowPath = slowPathCall( |
260 | slowCases, this, operationGetByIdWithThisOptimize, |
261 | JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(thisTagGPR, thisPayloadGPR), identifierUID(identifierNumber)); |
262 | } |
263 | |
264 | m_jit.addGetByIdWithThis(gen, slowPath.get()); |
265 | addSlowPathGenerator(WTFMove(slowPath)); |
266 | } |
267 | |
268 | void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand) |
269 | { |
270 | JSValueOperand arg(this, operand, ManualOperandSpeculation); |
271 | GPRReg argTagGPR = arg.tagGPR(); |
272 | GPRReg argPayloadGPR = arg.payloadGPR(); |
273 | |
274 | GPRTemporary resultPayload(this, Reuse, arg, PayloadWord); |
275 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
276 | |
277 | JITCompiler::Jump notCell; |
278 | JITCompiler::Jump notMasqueradesAsUndefined; |
279 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { |
280 | if (!isKnownCell(operand.node())) |
281 | notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); |
282 | |
283 | m_jit.move(TrustedImm32(0), resultPayloadGPR); |
284 | notMasqueradesAsUndefined = m_jit.jump(); |
285 | } else { |
286 | GPRTemporary localGlobalObject(this); |
287 | GPRTemporary remoteGlobalObject(this); |
288 | |
289 | if (!isKnownCell(operand.node())) |
290 | notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); |
291 | |
292 | JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8( |
293 | JITCompiler::NonZero, |
294 | JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()), |
295 | JITCompiler::TrustedImm32(MasqueradesAsUndefined)); |
296 | |
297 | m_jit.move(TrustedImm32(0), resultPayloadGPR); |
298 | notMasqueradesAsUndefined = m_jit.jump(); |
299 | |
300 | isMasqueradesAsUndefined.link(&m_jit); |
301 | GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); |
302 | GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); |
303 | m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); |
304 | m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultPayloadGPR); |
305 | m_jit.loadPtr(JITCompiler::Address(resultPayloadGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); |
306 | m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR); |
307 | } |
308 | |
309 | if (!isKnownCell(operand.node())) { |
310 | JITCompiler::Jump done = m_jit.jump(); |
311 | |
312 | notCell.link(&m_jit); |
313 | // null or undefined? |
314 | COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); |
315 | m_jit.or32(TrustedImm32(1), argTagGPR, resultPayloadGPR); |
316 | m_jit.compare32(JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR); |
317 | |
318 | done.link(&m_jit); |
319 | } |
320 | |
321 | notMasqueradesAsUndefined.link(&m_jit); |
322 | |
323 | booleanResult(resultPayloadGPR, m_currentNode); |
324 | } |
325 | |
326 | void SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode) |
327 | { |
328 | BasicBlock* taken = branchNode->branchData()->taken.block; |
329 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; |
330 | |
331 | bool invert = false; |
332 | if (taken == nextBlock()) { |
333 | invert = !invert; |
334 | BasicBlock* tmp = taken; |
335 | taken = notTaken; |
336 | notTaken = tmp; |
337 | } |
338 | |
339 | JSValueOperand arg(this, operand, ManualOperandSpeculation); |
340 | GPRReg argTagGPR = arg.tagGPR(); |
341 | GPRReg argPayloadGPR = arg.payloadGPR(); |
342 | |
343 | GPRTemporary result(this, Reuse, arg, TagWord); |
344 | GPRReg resultGPR = result.gpr(); |
345 | |
346 | JITCompiler::Jump notCell; |
347 | |
348 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { |
349 | if (!isKnownCell(operand.node())) |
350 | notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); |
351 | |
352 | jump(invert ? taken : notTaken, ForceJump); |
353 | } else { |
354 | GPRTemporary localGlobalObject(this); |
355 | GPRTemporary remoteGlobalObject(this); |
356 | |
357 | if (!isKnownCell(operand.node())) |
358 | notCell = m_jit.branchIfNotCell(arg.jsValueRegs()); |
359 | |
360 | branchTest8(JITCompiler::Zero, |
361 | JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()), |
362 | JITCompiler::TrustedImm32(MasqueradesAsUndefined), |
363 | invert ? taken : notTaken); |
364 | |
365 | GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); |
366 | GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); |
367 | m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR); |
368 | m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultGPR); |
369 | m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); |
370 | branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken); |
371 | } |
372 | |
373 | if (!isKnownCell(operand.node())) { |
374 | jump(notTaken, ForceJump); |
375 | |
376 | notCell.link(&m_jit); |
377 | // null or undefined? |
378 | COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); |
379 | m_jit.or32(TrustedImm32(1), argTagGPR, resultGPR); |
380 | branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag), taken); |
381 | } |
382 | |
383 | jump(notTaken); |
384 | } |
385 | |
386 | void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert) |
387 | { |
388 | BasicBlock* taken = branchNode->branchData()->taken.block; |
389 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; |
390 | |
391 | // The branch instruction will branch to the taken block. |
392 | // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. |
393 | if (taken == nextBlock()) { |
394 | invert = !invert; |
395 | BasicBlock* tmp = taken; |
396 | taken = notTaken; |
397 | notTaken = tmp; |
398 | } |
399 | |
400 | JSValueOperand arg1(this, node->child1()); |
401 | JSValueOperand arg2(this, node->child2()); |
402 | GPRReg arg1PayloadGPR = arg1.payloadGPR(); |
403 | GPRReg arg2PayloadGPR = arg2.payloadGPR(); |
404 | JSValueRegs arg1Regs = arg1.jsValueRegs(); |
405 | JSValueRegs arg2Regs = arg2.jsValueRegs(); |
406 | |
407 | GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord); |
408 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
409 | |
410 | arg1.use(); |
411 | arg2.use(); |
412 | |
413 | if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) { |
414 | // see if we get lucky: if the arguments are cells and they reference the same |
415 | // cell, then they must be strictly equal. |
416 | branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR, invert ? notTaken : taken); |
417 | |
418 | silentSpillAllRegisters(resultPayloadGPR); |
419 | callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1PayloadGPR, arg2PayloadGPR); |
420 | m_jit.exceptionCheck(); |
421 | silentFillAllRegisters(); |
422 | |
423 | branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken); |
424 | } else { |
425 | // FIXME: Add fast paths for twoCells, number etc. |
426 | |
427 | silentSpillAllRegisters(resultPayloadGPR); |
428 | callOperation(operationCompareStrictEq, resultPayloadGPR, arg1Regs, arg2Regs); |
429 | m_jit.exceptionCheck(); |
430 | silentFillAllRegisters(); |
431 | |
432 | branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken); |
433 | } |
434 | |
435 | jump(notTaken); |
436 | } |
437 | |
438 | void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) |
439 | { |
440 | JSValueOperand arg1(this, node->child1()); |
441 | JSValueOperand arg2(this, node->child2()); |
442 | GPRReg arg1PayloadGPR = arg1.payloadGPR(); |
443 | GPRReg arg2PayloadGPR = arg2.payloadGPR(); |
444 | JSValueRegs arg1Regs = arg1.jsValueRegs(); |
445 | JSValueRegs arg2Regs = arg2.jsValueRegs(); |
446 | |
447 | GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord); |
448 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
449 | |
450 | arg1.use(); |
451 | arg2.use(); |
452 | |
453 | if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) { |
454 | // see if we get lucky: if the arguments are cells and they reference the same |
455 | // cell, then they must be strictly equal. |
456 | // FIXME: this should flush registers instead of silent spill/fill. |
457 | JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR); |
458 | |
459 | m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR); |
460 | JITCompiler::Jump done = m_jit.jump(); |
461 | |
462 | notEqualCase.link(&m_jit); |
463 | |
464 | silentSpillAllRegisters(resultPayloadGPR); |
465 | callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1PayloadGPR, arg2PayloadGPR); |
466 | m_jit.exceptionCheck(); |
467 | silentFillAllRegisters(); |
468 | |
469 | m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR); |
470 | |
471 | done.link(&m_jit); |
472 | } else { |
473 | // FIXME: Add fast paths. |
474 | |
475 | silentSpillAllRegisters(resultPayloadGPR); |
476 | callOperation(operationCompareStrictEq, resultPayloadGPR, arg1Regs, arg2Regs); |
477 | silentFillAllRegisters(); |
478 | m_jit.exceptionCheck(); |
479 | |
480 | m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR); |
481 | } |
482 | |
483 | booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); |
484 | } |
485 | |
486 | void SpeculativeJIT::compileCompareEqPtr(Node* node) |
487 | { |
488 | JSValueOperand operand(this, node->child1()); |
489 | GPRTemporary result(this); |
490 | JSValueRegs regs = operand.jsValueRegs(); |
491 | GPRReg resultGPR = result.gpr(); |
492 | m_jit.boxBooleanPayload(false, resultGPR); |
493 | JITCompiler::JumpList notEqual = m_jit.branchIfNotEqual(regs, node->cellOperand()->value()); |
494 | m_jit.boxBooleanPayload(true, resultGPR); |
495 | notEqual.link(&m_jit); |
496 | blessedBooleanResult(resultGPR, node); |
497 | } |
498 | |
499 | void SpeculativeJIT::emitCall(Node* node) |
500 | { |
501 | CallLinkInfo::CallType callType; |
502 | bool isVarargs = false; |
503 | bool isForwardVarargs = false; |
504 | bool isTail = false; |
505 | bool isDirect = false; |
506 | bool isEmulatedTail = false; |
507 | switch (node->op()) { |
508 | case Call: |
509 | case CallEval: |
510 | callType = CallLinkInfo::Call; |
511 | break; |
512 | case TailCall: |
513 | callType = CallLinkInfo::TailCall; |
514 | isTail = true; |
515 | break; |
516 | case TailCallInlinedCaller: |
517 | callType = CallLinkInfo::Call; |
518 | isEmulatedTail = true; |
519 | break; |
520 | case Construct: |
521 | callType = CallLinkInfo::Construct; |
522 | break; |
523 | case CallVarargs: |
524 | callType = CallLinkInfo::CallVarargs; |
525 | isVarargs = true; |
526 | break; |
527 | case TailCallVarargs: |
528 | callType = CallLinkInfo::TailCallVarargs; |
529 | isVarargs = true; |
530 | isTail = true; |
531 | break; |
532 | case TailCallVarargsInlinedCaller: |
533 | callType = CallLinkInfo::CallVarargs; |
534 | isVarargs = true; |
535 | isEmulatedTail = true; |
536 | break; |
537 | case ConstructVarargs: |
538 | callType = CallLinkInfo::ConstructVarargs; |
539 | isVarargs = true; |
540 | break; |
541 | case CallForwardVarargs: |
542 | callType = CallLinkInfo::CallVarargs; |
543 | isForwardVarargs = true; |
544 | break; |
545 | case TailCallForwardVarargs: |
546 | callType = CallLinkInfo::TailCallVarargs; |
547 | isTail = true; |
548 | isForwardVarargs = true; |
549 | break; |
550 | case TailCallForwardVarargsInlinedCaller: |
551 | callType = CallLinkInfo::CallVarargs; |
552 | isEmulatedTail = true; |
553 | isForwardVarargs = true; |
554 | break; |
555 | case ConstructForwardVarargs: |
556 | callType = CallLinkInfo::ConstructVarargs; |
557 | isForwardVarargs = true; |
558 | break; |
559 | case DirectCall: |
560 | callType = CallLinkInfo::DirectCall; |
561 | isDirect = true; |
562 | break; |
563 | case DirectConstruct: |
564 | callType = CallLinkInfo::DirectConstruct; |
565 | isDirect = true; |
566 | break; |
567 | case DirectTailCall: |
568 | callType = CallLinkInfo::DirectTailCall; |
569 | isTail = true; |
570 | isDirect = true; |
571 | break; |
572 | case DirectTailCallInlinedCaller: |
573 | callType = CallLinkInfo::DirectCall; |
574 | isEmulatedTail = true; |
575 | isDirect = true; |
576 | break; |
577 | default: |
578 | DFG_CRASH(m_jit.graph(), node, "bad node type" ); |
579 | break; |
580 | } |
581 | |
582 | Edge calleeEdge = m_jit.graph().child(node, 0); |
583 | GPRReg calleeTagGPR = InvalidGPRReg; |
584 | GPRReg calleePayloadGPR = InvalidGPRReg; |
585 | CallFrameShuffleData shuffleData; |
586 | |
587 | ExecutableBase* executable = nullptr; |
588 | FunctionExecutable* functionExecutable = nullptr; |
589 | if (isDirect) { |
590 | executable = node->castOperand<ExecutableBase*>(); |
591 | functionExecutable = jsDynamicCast<FunctionExecutable*>(*m_jit.vm(), executable); |
592 | } |
593 | |
594 | unsigned numPassedArgs = 0; |
595 | unsigned numAllocatedArgs = 0; |
596 | |
597 | // Gotta load the arguments somehow. Varargs is trickier. |
598 | if (isVarargs || isForwardVarargs) { |
599 | RELEASE_ASSERT(!isDirect); |
600 | CallVarargsData* data = node->callVarargsData(); |
601 | |
602 | int numUsedStackSlots = m_jit.graph().m_nextMachineLocal; |
603 | |
604 | if (isForwardVarargs) { |
605 | flushRegisters(); |
606 | if (node->child3()) |
607 | use(node->child3()); |
608 | |
609 | GPRReg scratchGPR1; |
610 | GPRReg scratchGPR2; |
611 | GPRReg scratchGPR3; |
612 | |
613 | scratchGPR1 = JITCompiler::selectScratchGPR(); |
614 | scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1); |
615 | scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2); |
616 | |
617 | m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2); |
618 | JITCompiler::JumpList slowCase; |
619 | InlineCallFrame* inlineCallFrame; |
620 | if (node->child3()) |
621 | inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame(); |
622 | else |
623 | inlineCallFrame = node->origin.semantic.inlineCallFrame(); |
624 | // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds. |
625 | emitSetupVarargsFrameFastCase(*m_jit.vm(), m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase); |
626 | JITCompiler::Jump done = m_jit.jump(); |
627 | slowCase.link(&m_jit); |
628 | callOperation(operationThrowStackOverflowForVarargs); |
629 | m_jit.exceptionCheck(); |
630 | m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow); |
631 | done.link(&m_jit); |
632 | } else { |
633 | GPRReg argumentsPayloadGPR; |
634 | GPRReg argumentsTagGPR; |
635 | GPRReg scratchGPR1; |
636 | GPRReg scratchGPR2; |
637 | GPRReg scratchGPR3; |
638 | |
639 | auto loadArgumentsGPR = [&] (GPRReg reservedGPR) { |
640 | if (reservedGPR != InvalidGPRReg) |
641 | lock(reservedGPR); |
642 | JSValueOperand arguments(this, node->child3()); |
643 | argumentsTagGPR = arguments.tagGPR(); |
644 | argumentsPayloadGPR = arguments.payloadGPR(); |
645 | if (reservedGPR != InvalidGPRReg) |
646 | unlock(reservedGPR); |
647 | flushRegisters(); |
648 | |
649 | scratchGPR1 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, reservedGPR); |
650 | scratchGPR2 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, reservedGPR); |
651 | scratchGPR3 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, scratchGPR2, reservedGPR); |
652 | }; |
653 | |
654 | loadArgumentsGPR(InvalidGPRReg); |
655 | |
656 | DFG_ASSERT(m_jit.graph(), node, isFlushed()); |
657 | |
658 | // Right now, arguments is in argumentsTagGPR/argumentsPayloadGPR and the register file is |
659 | // flushed. |
660 | callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, JSValueRegs(argumentsTagGPR, argumentsPayloadGPR), numUsedStackSlots, data->firstVarArgOffset); |
661 | m_jit.exceptionCheck(); |
662 | |
663 | // Now we have the argument count of the callee frame, but we've lost the arguments operand. |
664 | // Reconstruct the arguments operand while preserving the callee frame. |
665 | loadArgumentsGPR(GPRInfo::returnValueGPR); |
666 | m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1); |
667 | emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1); |
668 | m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister); |
669 | |
670 | callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, JSValueRegs(argumentsTagGPR, argumentsPayloadGPR), data->firstVarArgOffset, GPRInfo::returnValueGPR); |
671 | m_jit.exceptionCheck(); |
672 | m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, JITCompiler::stackPointerRegister); |
673 | } |
674 | |
675 | DFG_ASSERT(m_jit.graph(), node, isFlushed()); |
676 | |
677 | // We don't need the arguments array anymore. |
678 | if (isVarargs) |
679 | use(node->child3()); |
680 | |
681 | // Now set up the "this" argument. |
682 | JSValueOperand thisArgument(this, node->child2()); |
683 | GPRReg thisArgumentTagGPR = thisArgument.tagGPR(); |
684 | GPRReg thisArgumentPayloadGPR = thisArgument.payloadGPR(); |
685 | thisArgument.use(); |
686 | |
687 | m_jit.store32(thisArgumentTagGPR, JITCompiler::calleeArgumentTagSlot(0)); |
688 | m_jit.store32(thisArgumentPayloadGPR, JITCompiler::calleeArgumentPayloadSlot(0)); |
689 | } else { |
690 | // The call instruction's first child is either the function (normal call) or the |
691 | // receiver (method call). subsequent children are the arguments. |
692 | numPassedArgs = node->numChildren() - 1; |
693 | numAllocatedArgs = numPassedArgs; |
694 | |
695 | if (functionExecutable) { |
696 | // Allocate more args if this would let us avoid arity checks. This is throttled by |
697 | // CallLinkInfo's limit. It's probably good to throttle it - if the callee wants a |
698 | // ginormous amount of argument space then it's better for them to do it so that when we |
699 | // make calls to other things, we don't waste space. |
700 | unsigned desiredNumAllocatedArgs = static_cast<unsigned>(functionExecutable->parameterCount()) + 1; |
701 | if (desiredNumAllocatedArgs <= Options::maximumDirectCallStackSize()) { |
702 | numAllocatedArgs = std::max(numAllocatedArgs, desiredNumAllocatedArgs); |
703 | |
704 | // Whoever converts to DirectCall should do this adjustment. It's too late for us to |
705 | // do this adjustment now since we will have already emitted code that relied on the |
706 | // value of m_parameterSlots. |
707 | DFG_ASSERT( |
708 | m_jit.graph(), node, |
709 | Graph::parameterSlotsForArgCount(numAllocatedArgs) |
710 | <= m_jit.graph().m_parameterSlots); |
711 | } |
712 | } |
713 | |
714 | if (isTail) { |
715 | JSValueOperand callee(this, calleeEdge); |
716 | calleeTagGPR = callee.tagGPR(); |
717 | calleePayloadGPR = callee.payloadGPR(); |
718 | if (!isDirect) |
719 | use(calleeEdge); |
720 | |
721 | shuffleData.numLocals = m_jit.graph().frameRegisterCount(); |
722 | shuffleData.callee = ValueRecovery::inPair(calleeTagGPR, calleePayloadGPR); |
723 | shuffleData.args.resize(numAllocatedArgs); |
724 | shuffleData.numPassedArgs = numPassedArgs; |
725 | |
726 | for (unsigned i = 0; i < numPassedArgs; ++i) { |
727 | Edge argEdge = m_jit.graph().varArgChild(node, i + 1); |
728 | GenerationInfo& info = generationInfo(argEdge.node()); |
729 | if (!isDirect) |
730 | use(argEdge); |
731 | shuffleData.args[i] = info.recovery(argEdge->virtualRegister()); |
732 | } |
733 | |
734 | for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i) |
735 | shuffleData.args[i] = ValueRecovery::constant(jsUndefined()); |
736 | } else { |
737 | m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), m_jit.calleeFramePayloadSlot(CallFrameSlot::argumentCount)); |
738 | |
739 | for (unsigned i = 0; i < numPassedArgs; i++) { |
740 | Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i]; |
741 | JSValueOperand arg(this, argEdge); |
742 | GPRReg argTagGPR = arg.tagGPR(); |
743 | GPRReg argPayloadGPR = arg.payloadGPR(); |
744 | use(argEdge); |
745 | |
746 | m_jit.store32(argTagGPR, m_jit.calleeArgumentTagSlot(i)); |
747 | m_jit.store32(argPayloadGPR, m_jit.calleeArgumentPayloadSlot(i)); |
748 | } |
749 | |
750 | for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i) |
751 | m_jit.storeTrustedValue(jsUndefined(), JITCompiler::calleeArgumentSlot(i)); |
752 | } |
753 | } |
754 | |
755 | if (!isTail || isVarargs || isForwardVarargs) { |
756 | JSValueOperand callee(this, calleeEdge); |
757 | calleeTagGPR = callee.tagGPR(); |
758 | calleePayloadGPR = callee.payloadGPR(); |
759 | use(calleeEdge); |
760 | m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(CallFrameSlot::callee)); |
761 | m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(CallFrameSlot::callee)); |
762 | |
763 | if (!isTail) |
764 | flushRegisters(); |
765 | } |
766 | |
767 | JITCompiler::DataLabelPtr targetToCheck; |
768 | JITCompiler::JumpList slowPath; |
769 | |
770 | CodeOrigin staticOrigin = node->origin.semantic; |
771 | InlineCallFrame* staticInlineCallFrame = staticOrigin.inlineCallFrame(); |
772 | ASSERT(!isTail || !staticInlineCallFrame || !staticInlineCallFrame->getCallerSkippingTailCalls()); |
773 | ASSERT(!isEmulatedTail || (staticInlineCallFrame && staticInlineCallFrame->getCallerSkippingTailCalls())); |
774 | CodeOrigin dynamicOrigin = |
775 | isEmulatedTail ? *staticInlineCallFrame->getCallerSkippingTailCalls() : staticOrigin; |
776 | CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(dynamicOrigin, m_stream->size()); |
777 | |
778 | CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo(); |
779 | info->setUpCall(callType, node->origin.semantic, calleePayloadGPR); |
780 | |
781 | auto setResultAndResetStack = [&] () { |
782 | JSValueRegsFlushedCallResult result(this); |
783 | JSValueRegs resultRegs = result.regs(); |
784 | |
785 | m_jit.setupResults(resultRegs); |
786 | |
787 | jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly); |
788 | // After the calls are done, we need to reestablish our stack |
789 | // pointer. We rely on this for varargs calls, calls with arity |
790 | // mismatch (the callframe is slided) and tail calls. |
791 | m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister); |
792 | }; |
793 | |
794 | if (node->op() == CallEval) { |
795 | // We want to call operationCallEval but we don't want to overwrite the parameter area in |
796 | // which we have created a prototypical eval call frame. This means that we have to |
797 | // subtract stack to make room for the call. Lucky for us, at this point we have the whole |
798 | // register file to ourselves. |
799 | |
800 | m_jit.emitStoreCallSiteIndex(callSite); |
801 | m_jit.addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), JITCompiler::stackPointerRegister, GPRInfo::regT0); |
802 | m_jit.storePtr(GPRInfo::callFrameRegister, JITCompiler::Address(GPRInfo::regT0, CallFrame::callerFrameOffset())); |
803 | |
804 | // Now we need to make room for: |
805 | // - The caller frame and PC of a call to operationCallEval. |
806 | // - Potentially two arguments on the stack. |
807 | unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(ExecState*) * 2; |
808 | requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes); |
809 | m_jit.subPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister); |
810 | m_jit.setupArguments<decltype(operationCallEval)>(GPRInfo::regT0); |
811 | prepareForExternalCall(); |
812 | m_jit.appendCall(operationCallEval); |
813 | m_jit.exceptionCheck(); |
814 | JITCompiler::Jump done = m_jit.branchIfNotEmpty(GPRInfo::returnValueGPR2); |
815 | |
816 | // This is the part where we meant to make a normal call. Oops. |
817 | m_jit.addPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister); |
818 | m_jit.load32(JITCompiler::calleeFrameSlot(CallFrameSlot::callee).withOffset(PayloadOffset), GPRInfo::regT0); |
819 | m_jit.load32(JITCompiler::calleeFrameSlot(CallFrameSlot::callee).withOffset(TagOffset), GPRInfo::regT1); |
820 | m_jit.emitDumbVirtualCall(*m_jit.vm(), info); |
821 | |
822 | done.link(&m_jit); |
823 | setResultAndResetStack(); |
824 | return; |
825 | } |
826 | |
827 | if (isDirect) { |
828 | info->setExecutableDuringCompilation(executable); |
829 | info->setMaxNumArguments(numAllocatedArgs); |
830 | |
831 | if (isTail) { |
832 | RELEASE_ASSERT(node->op() == DirectTailCall); |
833 | |
834 | JITCompiler::PatchableJump patchableJump = m_jit.patchableJump(); |
835 | JITCompiler::Label mainPath = m_jit.label(); |
836 | |
837 | m_jit.emitStoreCallSiteIndex(callSite); |
838 | |
839 | info->setFrameShuffleData(shuffleData); |
840 | CallFrameShuffler(m_jit, shuffleData).prepareForTailCall(); |
841 | |
842 | JITCompiler::Call call = m_jit.nearTailCall(); |
843 | |
844 | JITCompiler::Label slowPath = m_jit.label(); |
845 | patchableJump.m_jump.linkTo(slowPath, &m_jit); |
846 | |
847 | silentSpillAllRegisters(InvalidGPRReg); |
848 | callOperation(operationLinkDirectCall, info, calleePayloadGPR); |
849 | silentFillAllRegisters(); |
850 | m_jit.exceptionCheck(); |
851 | m_jit.jump().linkTo(mainPath, &m_jit); |
852 | |
853 | useChildren(node); |
854 | |
855 | m_jit.addJSDirectTailCall(patchableJump, call, slowPath, info); |
856 | return; |
857 | } |
858 | |
859 | JITCompiler::Label mainPath = m_jit.label(); |
860 | |
861 | m_jit.emitStoreCallSiteIndex(callSite); |
862 | |
863 | JITCompiler::Call call = m_jit.nearCall(); |
864 | JITCompiler::Jump done = m_jit.jump(); |
865 | |
866 | JITCompiler::Label slowPath = m_jit.label(); |
867 | if (isX86()) |
868 | m_jit.pop(JITCompiler::selectScratchGPR(calleePayloadGPR)); |
869 | |
870 | callOperation(operationLinkDirectCall, info, calleePayloadGPR); |
871 | m_jit.exceptionCheck(); |
872 | m_jit.jump().linkTo(mainPath, &m_jit); |
873 | |
874 | done.link(&m_jit); |
875 | |
876 | setResultAndResetStack(); |
877 | |
878 | m_jit.addJSDirectCall(call, slowPath, info); |
879 | return; |
880 | } |
881 | |
882 | m_jit.emitStoreCallSiteIndex(callSite); |
883 | |
884 | slowPath.append(m_jit.branchIfNotCell(JSValueRegs(calleeTagGPR, calleePayloadGPR))); |
885 | slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck)); |
886 | |
887 | if (isTail) { |
888 | if (node->op() == TailCall) { |
889 | info->setFrameShuffleData(shuffleData); |
890 | CallFrameShuffler(m_jit, shuffleData).prepareForTailCall(); |
891 | } else { |
892 | m_jit.emitRestoreCalleeSaves(); |
893 | m_jit.prepareForTailCallSlow(); |
894 | } |
895 | } |
896 | |
897 | JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall(); |
898 | |
899 | JITCompiler::Jump done = m_jit.jump(); |
900 | |
901 | slowPath.link(&m_jit); |
902 | |
903 | if (node->op() == TailCall) { |
904 | CallFrameShuffler callFrameShuffler(m_jit, shuffleData); |
905 | callFrameShuffler.setCalleeJSValueRegs(JSValueRegs( |
906 | GPRInfo::regT1, GPRInfo::regT0)); |
907 | callFrameShuffler.prepareForSlowPath(); |
908 | } else { |
909 | // Callee payload needs to be in regT0, tag in regT1 |
910 | if (calleeTagGPR == GPRInfo::regT0) { |
911 | if (calleePayloadGPR == GPRInfo::regT1) |
912 | m_jit.swap(GPRInfo::regT1, GPRInfo::regT0); |
913 | else { |
914 | m_jit.move(calleeTagGPR, GPRInfo::regT1); |
915 | m_jit.move(calleePayloadGPR, GPRInfo::regT0); |
916 | } |
917 | } else { |
918 | m_jit.move(calleePayloadGPR, GPRInfo::regT0); |
919 | m_jit.move(calleeTagGPR, GPRInfo::regT1); |
920 | } |
921 | |
922 | if (isTail) |
923 | m_jit.emitRestoreCalleeSaves(); |
924 | } |
925 | |
926 | m_jit.move(TrustedImmPtr(info), GPRInfo::regT2); |
927 | JITCompiler::Call slowCall = m_jit.nearCall(); |
928 | |
929 | done.link(&m_jit); |
930 | |
931 | if (isTail) |
932 | m_jit.abortWithReason(JITDidReturnFromTailCall); |
933 | else |
934 | setResultAndResetStack(); |
935 | |
936 | m_jit.addJSCall(fastCall, slowCall, targetToCheck, info); |
937 | } |
938 | |
939 | template<bool strict> |
940 | GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat) |
941 | { |
942 | AbstractValue& value = m_state.forNode(edge); |
943 | SpeculatedType type = value.m_type; |
944 | ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32Only)); |
945 | |
946 | m_interpreter.filter(value, SpecInt32Only); |
947 | if (value.isClear()) { |
948 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); |
949 | returnFormat = DataFormatInt32; |
950 | return allocate(); |
951 | } |
952 | |
953 | VirtualRegister virtualRegister = edge->virtualRegister(); |
954 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
955 | |
956 | switch (info.registerFormat()) { |
957 | case DataFormatNone: { |
958 | if (edge->hasConstant()) { |
959 | ASSERT(edge->isInt32Constant()); |
960 | GPRReg gpr = allocate(); |
961 | m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr); |
962 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); |
963 | info.fillInt32(*m_stream, gpr); |
964 | returnFormat = DataFormatInt32; |
965 | return gpr; |
966 | } |
967 | |
968 | DataFormat spillFormat = info.spillFormat(); |
969 | |
970 | ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32); |
971 | |
972 | // If we know this was spilled as an integer we can fill without checking. |
973 | if (type & ~SpecInt32Only) |
974 | speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag))); |
975 | |
976 | GPRReg gpr = allocate(); |
977 | m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); |
978 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); |
979 | info.fillInt32(*m_stream, gpr); |
980 | returnFormat = DataFormatInt32; |
981 | return gpr; |
982 | } |
983 | |
984 | case DataFormatJSInt32: |
985 | case DataFormatJS: { |
986 | // Check the value is an integer. |
987 | GPRReg tagGPR = info.tagGPR(); |
988 | GPRReg payloadGPR = info.payloadGPR(); |
989 | m_gprs.lock(tagGPR); |
990 | m_gprs.lock(payloadGPR); |
991 | if (type & ~SpecInt32Only) |
992 | speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branchIfNotInt32(tagGPR)); |
993 | m_gprs.unlock(tagGPR); |
994 | m_gprs.release(tagGPR); |
995 | m_gprs.release(payloadGPR); |
996 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger); |
997 | info.fillInt32(*m_stream, payloadGPR); |
998 | // If !strict we're done, return. |
999 | returnFormat = DataFormatInt32; |
1000 | return payloadGPR; |
1001 | } |
1002 | |
1003 | case DataFormatInt32: { |
1004 | GPRReg gpr = info.gpr(); |
1005 | m_gprs.lock(gpr); |
1006 | returnFormat = DataFormatInt32; |
1007 | return gpr; |
1008 | } |
1009 | |
1010 | case DataFormatCell: |
1011 | case DataFormatBoolean: |
1012 | case DataFormatJSDouble: |
1013 | case DataFormatJSCell: |
1014 | case DataFormatJSBoolean: |
1015 | case DataFormatDouble: |
1016 | case DataFormatStorage: |
1017 | default: |
1018 | RELEASE_ASSERT_NOT_REACHED(); |
1019 | return InvalidGPRReg; |
1020 | } |
1021 | } |
1022 | |
1023 | GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat) |
1024 | { |
1025 | return fillSpeculateInt32Internal<false>(edge, returnFormat); |
1026 | } |
1027 | |
1028 | GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge) |
1029 | { |
1030 | DataFormat mustBeDataFormatInt32; |
1031 | GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32); |
1032 | ASSERT(mustBeDataFormatInt32 == DataFormatInt32); |
1033 | return result; |
1034 | } |
1035 | |
1036 | FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge) |
1037 | { |
1038 | ASSERT(isDouble(edge.useKind())); |
1039 | ASSERT(edge->hasDoubleResult()); |
1040 | VirtualRegister virtualRegister = edge->virtualRegister(); |
1041 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
1042 | |
1043 | if (info.registerFormat() == DataFormatNone) { |
1044 | |
1045 | if (edge->hasConstant()) { |
1046 | RELEASE_ASSERT(edge->isNumberConstant()); |
1047 | FPRReg fpr = fprAllocate(); |
1048 | m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(edge.node())), fpr); |
1049 | m_fprs.retain(fpr, virtualRegister, SpillOrderConstant); |
1050 | info.fillDouble(*m_stream, fpr); |
1051 | return fpr; |
1052 | } |
1053 | |
1054 | RELEASE_ASSERT(info.spillFormat() == DataFormatDouble); |
1055 | FPRReg fpr = fprAllocate(); |
1056 | m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); |
1057 | m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); |
1058 | info.fillDouble(*m_stream, fpr); |
1059 | return fpr; |
1060 | } |
1061 | |
1062 | RELEASE_ASSERT(info.registerFormat() == DataFormatDouble); |
1063 | FPRReg fpr = info.fpr(); |
1064 | m_fprs.lock(fpr); |
1065 | return fpr; |
1066 | } |
1067 | |
1068 | GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) |
1069 | { |
1070 | AbstractValue& value = m_state.forNode(edge); |
1071 | SpeculatedType type = value.m_type; |
1072 | ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell)); |
1073 | |
1074 | m_interpreter.filter(value, SpecCell); |
1075 | if (value.isClear()) { |
1076 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); |
1077 | return allocate(); |
1078 | } |
1079 | |
1080 | VirtualRegister virtualRegister = edge->virtualRegister(); |
1081 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
1082 | |
1083 | switch (info.registerFormat()) { |
1084 | case DataFormatNone: { |
1085 | if (edge->hasConstant()) { |
1086 | GPRReg gpr = allocate(); |
1087 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); |
1088 | m_jit.move(TrustedImmPtr(edge->constant()), gpr); |
1089 | info.fillCell(*m_stream, gpr); |
1090 | return gpr; |
1091 | } |
1092 | |
1093 | ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell); |
1094 | if (type & ~SpecCell) { |
1095 | speculationCheck( |
1096 | BadType, |
1097 | JSValueSource(JITCompiler::addressFor(virtualRegister)), |
1098 | edge, |
1099 | m_jit.branch32( |
1100 | MacroAssembler::NotEqual, |
1101 | JITCompiler::tagFor(virtualRegister), |
1102 | TrustedImm32(JSValue::CellTag))); |
1103 | } |
1104 | GPRReg gpr = allocate(); |
1105 | m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); |
1106 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); |
1107 | info.fillCell(*m_stream, gpr); |
1108 | return gpr; |
1109 | } |
1110 | |
1111 | case DataFormatCell: { |
1112 | GPRReg gpr = info.gpr(); |
1113 | m_gprs.lock(gpr); |
1114 | return gpr; |
1115 | } |
1116 | |
1117 | case DataFormatJSCell: |
1118 | case DataFormatJS: { |
1119 | GPRReg tagGPR = info.tagGPR(); |
1120 | GPRReg payloadGPR = info.payloadGPR(); |
1121 | m_gprs.lock(tagGPR); |
1122 | m_gprs.lock(payloadGPR); |
1123 | if (type & ~SpecCell) { |
1124 | speculationCheck( |
1125 | BadType, JSValueRegs(tagGPR, payloadGPR), edge, |
1126 | m_jit.branchIfNotCell(info.jsValueRegs())); |
1127 | } |
1128 | m_gprs.unlock(tagGPR); |
1129 | m_gprs.release(tagGPR); |
1130 | m_gprs.release(payloadGPR); |
1131 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderCell); |
1132 | info.fillCell(*m_stream, payloadGPR); |
1133 | return payloadGPR; |
1134 | } |
1135 | |
1136 | case DataFormatJSInt32: |
1137 | case DataFormatInt32: |
1138 | case DataFormatJSDouble: |
1139 | case DataFormatJSBoolean: |
1140 | case DataFormatBoolean: |
1141 | case DataFormatDouble: |
1142 | case DataFormatStorage: |
1143 | RELEASE_ASSERT_NOT_REACHED(); |
1144 | |
1145 | default: |
1146 | RELEASE_ASSERT_NOT_REACHED(); |
1147 | return InvalidGPRReg; |
1148 | } |
1149 | } |
1150 | |
1151 | GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge) |
1152 | { |
1153 | AbstractValue& value = m_state.forNode(edge); |
1154 | SpeculatedType type = value.m_type; |
1155 | ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean)); |
1156 | |
1157 | m_interpreter.filter(value, SpecBoolean); |
1158 | if (value.isClear()) { |
1159 | terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); |
1160 | return allocate(); |
1161 | } |
1162 | |
1163 | VirtualRegister virtualRegister = edge->virtualRegister(); |
1164 | GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister); |
1165 | |
1166 | switch (info.registerFormat()) { |
1167 | case DataFormatNone: { |
1168 | if (edge->hasConstant()) { |
1169 | JSValue jsValue = edge->asJSValue(); |
1170 | GPRReg gpr = allocate(); |
1171 | m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); |
1172 | m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr); |
1173 | info.fillBoolean(*m_stream, gpr); |
1174 | return gpr; |
1175 | } |
1176 | |
1177 | ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatBoolean); |
1178 | |
1179 | if (type & ~SpecBoolean) |
1180 | speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag))); |
1181 | |
1182 | GPRReg gpr = allocate(); |
1183 | m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); |
1184 | m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); |
1185 | info.fillBoolean(*m_stream, gpr); |
1186 | return gpr; |
1187 | } |
1188 | |
1189 | case DataFormatBoolean: { |
1190 | GPRReg gpr = info.gpr(); |
1191 | m_gprs.lock(gpr); |
1192 | return gpr; |
1193 | } |
1194 | |
1195 | case DataFormatJSBoolean: |
1196 | case DataFormatJS: { |
1197 | GPRReg tagGPR = info.tagGPR(); |
1198 | GPRReg payloadGPR = info.payloadGPR(); |
1199 | m_gprs.lock(tagGPR); |
1200 | m_gprs.lock(payloadGPR); |
1201 | if (type & ~SpecBoolean) |
1202 | speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branchIfNotBoolean(tagGPR, InvalidGPRReg)); |
1203 | |
1204 | m_gprs.unlock(tagGPR); |
1205 | m_gprs.release(tagGPR); |
1206 | m_gprs.release(payloadGPR); |
1207 | m_gprs.retain(payloadGPR, virtualRegister, SpillOrderBoolean); |
1208 | info.fillBoolean(*m_stream, payloadGPR); |
1209 | return payloadGPR; |
1210 | } |
1211 | |
1212 | case DataFormatJSInt32: |
1213 | case DataFormatInt32: |
1214 | case DataFormatJSDouble: |
1215 | case DataFormatJSCell: |
1216 | case DataFormatCell: |
1217 | case DataFormatDouble: |
1218 | case DataFormatStorage: |
1219 | RELEASE_ASSERT_NOT_REACHED(); |
1220 | |
1221 | default: |
1222 | RELEASE_ASSERT_NOT_REACHED(); |
1223 | return InvalidGPRReg; |
1224 | } |
1225 | } |
1226 | |
1227 | void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild) |
1228 | { |
1229 | SpeculateCellOperand op1(this, objectChild); |
1230 | JSValueOperand op2(this, otherChild); |
1231 | |
1232 | GPRReg op1GPR = op1.gpr(); |
1233 | GPRReg op2GPR = op2.payloadGPR(); |
1234 | |
1235 | DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1236 | |
1237 | GPRTemporary resultPayload(this, Reuse, op1); |
1238 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
1239 | |
1240 | MacroAssembler::Jump op2CellJump = m_jit.branchIfCell(op2.jsValueRegs()); |
1241 | |
1242 | m_jit.move(TrustedImm32(0), resultPayloadGPR); |
1243 | MacroAssembler::Jump op2NotCellJump = m_jit.jump(); |
1244 | |
1245 | // At this point we know that we can perform a straight-forward equality comparison on pointer |
1246 | // values because we are doing strict equality. |
1247 | op2CellJump.link(&m_jit); |
1248 | m_jit.compare32(MacroAssembler::Equal, op1GPR, op2GPR, resultPayloadGPR); |
1249 | |
1250 | op2NotCellJump.link(&m_jit); |
1251 | booleanResult(resultPayloadGPR, m_currentNode); |
1252 | } |
1253 | |
1254 | void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode) |
1255 | { |
1256 | BasicBlock* taken = branchNode->branchData()->taken.block; |
1257 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; |
1258 | |
1259 | SpeculateCellOperand op1(this, objectChild); |
1260 | JSValueOperand op2(this, otherChild); |
1261 | |
1262 | GPRReg op1GPR = op1.gpr(); |
1263 | GPRReg op2GPR = op2.payloadGPR(); |
1264 | |
1265 | DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1266 | |
1267 | branch32(MacroAssembler::NotEqual, op2.tagGPR(), TrustedImm32(JSValue::CellTag), notTaken); |
1268 | |
1269 | if (taken == nextBlock()) { |
1270 | branch32(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken); |
1271 | jump(taken); |
1272 | } else { |
1273 | branch32(MacroAssembler::Equal, op1GPR, op2GPR, taken); |
1274 | jump(notTaken); |
1275 | } |
1276 | } |
1277 | |
1278 | void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild) |
1279 | { |
1280 | SpeculateCellOperand op1(this, leftChild); |
1281 | JSValueOperand op2(this, rightChild, ManualOperandSpeculation); |
1282 | GPRTemporary result(this); |
1283 | |
1284 | GPRReg op1GPR = op1.gpr(); |
1285 | GPRReg op2TagGPR = op2.tagGPR(); |
1286 | GPRReg op2PayloadGPR = op2.payloadGPR(); |
1287 | GPRReg resultGPR = result.gpr(); |
1288 | |
1289 | bool masqueradesAsUndefinedWatchpointValid = |
1290 | masqueradesAsUndefinedWatchpointIsStillValid(); |
1291 | |
1292 | if (masqueradesAsUndefinedWatchpointValid) { |
1293 | DFG_TYPE_CHECK( |
1294 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1295 | } else { |
1296 | DFG_TYPE_CHECK( |
1297 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1298 | speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, |
1299 | m_jit.branchTest8( |
1300 | MacroAssembler::NonZero, |
1301 | MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), |
1302 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); |
1303 | } |
1304 | |
1305 | |
1306 | // It seems that most of the time when programs do a == b where b may be either null/undefined |
1307 | // or an object, b is usually an object. Balance the branches to make that case fast. |
1308 | MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs()); |
1309 | |
1310 | // We know that within this branch, rightChild must be a cell. |
1311 | if (masqueradesAsUndefinedWatchpointValid) { |
1312 | DFG_TYPE_CHECK( |
1313 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR)); |
1314 | } else { |
1315 | DFG_TYPE_CHECK( |
1316 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR)); |
1317 | speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, |
1318 | m_jit.branchTest8( |
1319 | MacroAssembler::NonZero, |
1320 | MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()), |
1321 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); |
1322 | } |
1323 | |
1324 | // At this point we know that we can perform a straight-forward equality comparison on pointer |
1325 | // values because both left and right are pointers to objects that have no special equality |
1326 | // protocols. |
1327 | MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2PayloadGPR); |
1328 | MacroAssembler::Jump trueCase = m_jit.jump(); |
1329 | |
1330 | rightNotCell.link(&m_jit); |
1331 | |
1332 | // We know that within this branch, rightChild must not be a cell. Check if that is enough to |
1333 | // prove that it is either null or undefined. |
1334 | if (needsTypeCheck(rightChild, SpecCell | SpecOther)) { |
1335 | m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR); |
1336 | |
1337 | typeCheck( |
1338 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther, |
1339 | m_jit.branch32( |
1340 | MacroAssembler::NotEqual, resultGPR, |
1341 | MacroAssembler::TrustedImm32(JSValue::NullTag))); |
1342 | } |
1343 | |
1344 | falseCase.link(&m_jit); |
1345 | m_jit.move(TrustedImm32(0), resultGPR); |
1346 | MacroAssembler::Jump done = m_jit.jump(); |
1347 | trueCase.link(&m_jit); |
1348 | m_jit.move(TrustedImm32(1), resultGPR); |
1349 | done.link(&m_jit); |
1350 | |
1351 | booleanResult(resultGPR, m_currentNode); |
1352 | } |
1353 | |
1354 | void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode) |
1355 | { |
1356 | BasicBlock* taken = branchNode->branchData()->taken.block; |
1357 | BasicBlock* notTaken = branchNode->branchData()->notTaken.block; |
1358 | |
1359 | SpeculateCellOperand op1(this, leftChild); |
1360 | JSValueOperand op2(this, rightChild, ManualOperandSpeculation); |
1361 | GPRTemporary result(this); |
1362 | |
1363 | GPRReg op1GPR = op1.gpr(); |
1364 | GPRReg op2TagGPR = op2.tagGPR(); |
1365 | GPRReg op2PayloadGPR = op2.payloadGPR(); |
1366 | GPRReg resultGPR = result.gpr(); |
1367 | |
1368 | bool masqueradesAsUndefinedWatchpointValid = |
1369 | masqueradesAsUndefinedWatchpointIsStillValid(); |
1370 | |
1371 | if (masqueradesAsUndefinedWatchpointValid) { |
1372 | DFG_TYPE_CHECK( |
1373 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1374 | } else { |
1375 | DFG_TYPE_CHECK( |
1376 | JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR)); |
1377 | speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, |
1378 | m_jit.branchTest8( |
1379 | MacroAssembler::NonZero, |
1380 | MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), |
1381 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); |
1382 | } |
1383 | |
1384 | // It seems that most of the time when programs do a == b where b may be either null/undefined |
1385 | // or an object, b is usually an object. Balance the branches to make that case fast. |
1386 | MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs()); |
1387 | |
1388 | // We know that within this branch, rightChild must be a cell. |
1389 | if (masqueradesAsUndefinedWatchpointValid) { |
1390 | DFG_TYPE_CHECK( |
1391 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, |
1392 | m_jit.branchIfNotObject(op2PayloadGPR)); |
1393 | } else { |
1394 | DFG_TYPE_CHECK( |
1395 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, |
1396 | m_jit.branchIfNotObject(op2PayloadGPR)); |
1397 | speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, |
1398 | m_jit.branchTest8( |
1399 | MacroAssembler::NonZero, |
1400 | MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()), |
1401 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); |
1402 | } |
1403 | |
1404 | // At this point we know that we can perform a straight-forward equality comparison on pointer |
1405 | // values because both left and right are pointers to objects that have no special equality |
1406 | // protocols. |
1407 | branch32(MacroAssembler::Equal, op1GPR, op2PayloadGPR, taken); |
1408 | |
1409 | // We know that within this branch, rightChild must not be a cell. Check if that is enough to |
1410 | // prove that it is either null or undefined. |
1411 | if (!needsTypeCheck(rightChild, SpecCell | SpecOther)) |
1412 | rightNotCell.link(&m_jit); |
1413 | else { |
1414 | jump(notTaken, ForceJump); |
1415 | |
1416 | rightNotCell.link(&m_jit); |
1417 | m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR); |
1418 | |
1419 | typeCheck( |
1420 | JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther, |
1421 | m_jit.branch32( |
1422 | MacroAssembler::NotEqual, resultGPR, |
1423 | MacroAssembler::TrustedImm32(JSValue::NullTag))); |
1424 | } |
1425 | |
1426 | jump(notTaken); |
1427 | } |
1428 | |
1429 | void SpeculativeJIT::compileSymbolUntypedEquality(Node* node, Edge symbolEdge, Edge untypedEdge) |
1430 | { |
1431 | SpeculateCellOperand symbol(this, symbolEdge); |
1432 | JSValueOperand untyped(this, untypedEdge); |
1433 | |
1434 | GPRReg symbolGPR = symbol.gpr(); |
1435 | GPRReg untypedGPR = untyped.payloadGPR(); |
1436 | |
1437 | speculateSymbol(symbolEdge, symbolGPR); |
1438 | |
1439 | GPRTemporary resultPayload(this, Reuse, symbol); |
1440 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
1441 | |
1442 | MacroAssembler::Jump untypedCellJump = m_jit.branchIfCell(untyped.jsValueRegs()); |
1443 | |
1444 | m_jit.move(TrustedImm32(0), resultPayloadGPR); |
1445 | MacroAssembler::Jump untypedNotCellJump = m_jit.jump(); |
1446 | |
1447 | // At this point we know that we can perform a straight-forward equality comparison on pointer |
1448 | // values because we are doing strict equality. |
1449 | untypedCellJump.link(&m_jit); |
1450 | m_jit.compare32(MacroAssembler::Equal, symbolGPR, untypedGPR, resultPayloadGPR); |
1451 | |
1452 | untypedNotCellJump.link(&m_jit); |
1453 | booleanResult(resultPayloadGPR, node); |
1454 | } |
1455 | |
1456 | void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse) |
1457 | { |
1458 | JSValueOperand value(this, nodeUse, ManualOperandSpeculation); |
1459 | GPRTemporary resultPayload(this); |
1460 | GPRReg valueTagGPR = value.tagGPR(); |
1461 | GPRReg valuePayloadGPR = value.payloadGPR(); |
1462 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
1463 | GPRTemporary structure; |
1464 | GPRReg structureGPR = InvalidGPRReg; |
1465 | |
1466 | bool masqueradesAsUndefinedWatchpointValid = |
1467 | masqueradesAsUndefinedWatchpointIsStillValid(); |
1468 | |
1469 | if (!masqueradesAsUndefinedWatchpointValid) { |
1470 | // The masquerades as undefined case will use the structure register, so allocate it here. |
1471 | // Do this at the top of the function to avoid branching around a register allocation. |
1472 | GPRTemporary realStructure(this); |
1473 | structure.adopt(realStructure); |
1474 | structureGPR = structure.gpr(); |
1475 | } |
1476 | |
1477 | MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs()); |
1478 | if (masqueradesAsUndefinedWatchpointValid) { |
1479 | DFG_TYPE_CHECK( |
1480 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, |
1481 | m_jit.branchIfNotObject(valuePayloadGPR)); |
1482 | } else { |
1483 | DFG_TYPE_CHECK( |
1484 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, |
1485 | m_jit.branchIfNotObject(valuePayloadGPR)); |
1486 | |
1487 | MacroAssembler::Jump isNotMasqueradesAsUndefined = |
1488 | m_jit.branchTest8( |
1489 | MacroAssembler::Zero, |
1490 | MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()), |
1491 | MacroAssembler::TrustedImm32(MasqueradesAsUndefined)); |
1492 | |
1493 | m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), structureGPR); |
1494 | speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, |
1495 | m_jit.branchPtr( |
1496 | MacroAssembler::Equal, |
1497 | MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()), |
1498 | TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); |
1499 | |
1500 | isNotMasqueradesAsUndefined.link(&m_jit); |
1501 | } |
1502 | m_jit.move(TrustedImm32(0), resultPayloadGPR); |
1503 | MacroAssembler::Jump done = m_jit.jump(); |
1504 | |
1505 | notCell.link(&m_jit); |
1506 | |
1507 | COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); |
1508 | if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) { |
1509 | m_jit.or32(TrustedImm32(1), valueTagGPR, resultPayloadGPR); |
1510 | typeCheck( |
1511 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther, |
1512 | m_jit.branch32( |
1513 | MacroAssembler::NotEqual, |
1514 | resultPayloadGPR, |
1515 | TrustedImm32(JSValue::NullTag))); |
1516 | } |
1517 | m_jit.move(TrustedImm32(1), resultPayloadGPR); |
1518 | |
1519 | done.link(&m_jit); |
1520 | |
1521 | booleanResult(resultPayloadGPR, m_currentNode); |
1522 | } |
1523 | |
1524 | void SpeculativeJIT::compileLogicalNot(Node* node) |
1525 | { |
1526 | switch (node->child1().useKind()) { |
1527 | case BooleanUse: |
1528 | case KnownBooleanUse: { |
1529 | SpeculateBooleanOperand value(this, node->child1()); |
1530 | GPRTemporary result(this, Reuse, value); |
1531 | m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr()); |
1532 | booleanResult(result.gpr(), node); |
1533 | return; |
1534 | } |
1535 | |
1536 | case ObjectOrOtherUse: { |
1537 | compileObjectOrOtherLogicalNot(node->child1()); |
1538 | return; |
1539 | } |
1540 | |
1541 | case Int32Use: { |
1542 | SpeculateInt32Operand value(this, node->child1()); |
1543 | GPRTemporary resultPayload(this, Reuse, value); |
1544 | m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), resultPayload.gpr()); |
1545 | booleanResult(resultPayload.gpr(), node); |
1546 | return; |
1547 | } |
1548 | |
1549 | case DoubleRepUse: { |
1550 | SpeculateDoubleOperand value(this, node->child1()); |
1551 | FPRTemporary scratch(this); |
1552 | GPRTemporary resultPayload(this); |
1553 | m_jit.move(TrustedImm32(0), resultPayload.gpr()); |
1554 | MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr()); |
1555 | m_jit.move(TrustedImm32(1), resultPayload.gpr()); |
1556 | nonZero.link(&m_jit); |
1557 | booleanResult(resultPayload.gpr(), node); |
1558 | return; |
1559 | } |
1560 | |
1561 | case UntypedUse: { |
1562 | JSValueOperand arg1(this, node->child1()); |
1563 | GPRTemporary result(this); |
1564 | GPRTemporary temp(this); |
1565 | FPRTemporary valueFPR(this); |
1566 | FPRTemporary tempFPR(this); |
1567 | |
1568 | GPRReg resultGPR = result.gpr(); |
1569 | |
1570 | bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid(); |
1571 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); |
1572 | bool negateResult = true; |
1573 | m_jit.emitConvertValueToBoolean(*m_jit.vm(), arg1.jsValueRegs(), resultGPR, temp.gpr(), valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject, negateResult); |
1574 | booleanResult(resultGPR, node); |
1575 | return; |
1576 | } |
1577 | case StringUse: |
1578 | return compileStringZeroLength(node); |
1579 | |
1580 | case StringOrOtherUse: |
1581 | return compileLogicalNotStringOrOther(node); |
1582 | |
1583 | default: |
1584 | RELEASE_ASSERT_NOT_REACHED(); |
1585 | break; |
1586 | } |
1587 | } |
1588 | |
1589 | void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken) |
1590 | { |
1591 | JSValueOperand value(this, nodeUse, ManualOperandSpeculation); |
1592 | GPRTemporary scratch(this); |
1593 | GPRReg valueTagGPR = value.tagGPR(); |
1594 | GPRReg valuePayloadGPR = value.payloadGPR(); |
1595 | GPRReg scratchGPR = scratch.gpr(); |
1596 | |
1597 | MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs()); |
1598 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { |
1599 | DFG_TYPE_CHECK( |
1600 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, |
1601 | m_jit.branchIfNotObject(valuePayloadGPR)); |
1602 | } else { |
1603 | DFG_TYPE_CHECK( |
1604 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, |
1605 | m_jit.branchIfNotObject(valuePayloadGPR)); |
1606 | |
1607 | JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8( |
1608 | JITCompiler::Zero, |
1609 | MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()), |
1610 | TrustedImm32(MasqueradesAsUndefined)); |
1611 | |
1612 | m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), scratchGPR); |
1613 | speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, |
1614 | m_jit.branchPtr( |
1615 | MacroAssembler::Equal, |
1616 | MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()), |
1617 | TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)))); |
1618 | |
1619 | isNotMasqueradesAsUndefined.link(&m_jit); |
1620 | } |
1621 | jump(taken, ForceJump); |
1622 | |
1623 | notCell.link(&m_jit); |
1624 | |
1625 | COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); |
1626 | if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) { |
1627 | m_jit.or32(TrustedImm32(1), valueTagGPR, scratchGPR); |
1628 | typeCheck( |
1629 | JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther, |
1630 | m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag))); |
1631 | } |
1632 | |
1633 | jump(notTaken); |
1634 | |
1635 | noResult(m_currentNode); |
1636 | } |
1637 | |
1638 | void SpeculativeJIT::emitBranch(Node* node) |
1639 | { |
1640 | BasicBlock* taken = node->branchData()->taken.block; |
1641 | BasicBlock* notTaken = node->branchData()->notTaken.block; |
1642 | |
1643 | switch (node->child1().useKind()) { |
1644 | case BooleanUse: |
1645 | case KnownBooleanUse: { |
1646 | SpeculateBooleanOperand value(this, node->child1()); |
1647 | MacroAssembler::ResultCondition condition = MacroAssembler::NonZero; |
1648 | |
1649 | if (taken == nextBlock()) { |
1650 | condition = MacroAssembler::Zero; |
1651 | BasicBlock* tmp = taken; |
1652 | taken = notTaken; |
1653 | notTaken = tmp; |
1654 | } |
1655 | |
1656 | branchTest32(condition, value.gpr(), TrustedImm32(1), taken); |
1657 | jump(notTaken); |
1658 | |
1659 | noResult(node); |
1660 | return; |
1661 | } |
1662 | |
1663 | case ObjectOrOtherUse: { |
1664 | emitObjectOrOtherBranch(node->child1(), taken, notTaken); |
1665 | return; |
1666 | } |
1667 | |
1668 | case StringUse: { |
1669 | emitStringBranch(node->child1(), taken, notTaken); |
1670 | return; |
1671 | } |
1672 | |
1673 | case StringOrOtherUse: { |
1674 | emitStringOrOtherBranch(node->child1(), taken, notTaken); |
1675 | return; |
1676 | } |
1677 | |
1678 | case DoubleRepUse: |
1679 | case Int32Use: { |
1680 | if (node->child1().useKind() == Int32Use) { |
1681 | bool invert = false; |
1682 | |
1683 | if (taken == nextBlock()) { |
1684 | invert = true; |
1685 | BasicBlock* tmp = taken; |
1686 | taken = notTaken; |
1687 | notTaken = tmp; |
1688 | } |
1689 | |
1690 | SpeculateInt32Operand value(this, node->child1()); |
1691 | branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken); |
1692 | } else { |
1693 | SpeculateDoubleOperand value(this, node->child1()); |
1694 | FPRTemporary scratch(this); |
1695 | branchDoubleNonZero(value.fpr(), scratch.fpr(), taken); |
1696 | } |
1697 | |
1698 | jump(notTaken); |
1699 | |
1700 | noResult(node); |
1701 | return; |
1702 | } |
1703 | |
1704 | case UntypedUse: { |
1705 | JSValueOperand value(this, node->child1()); |
1706 | FPRTemporary valueFPR(this); |
1707 | FPRTemporary tempFPR(this); |
1708 | GPRTemporary result(this); |
1709 | GPRTemporary temp(this); |
1710 | |
1711 | JSValueRegs valueRegs = value.jsValueRegs(); |
1712 | GPRReg resultGPR = result.gpr(); |
1713 | |
1714 | use(node->child1()); |
1715 | |
1716 | bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid(); |
1717 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); |
1718 | auto falsey = m_jit.branchIfFalsey(*m_jit.vm(), valueRegs, resultGPR, temp.gpr(), valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject); |
1719 | addBranch(falsey, notTaken); |
1720 | jump(taken, ForceJump); |
1721 | |
1722 | noResult(node, UseChildrenCalledExplicitly); |
1723 | return; |
1724 | } |
1725 | |
1726 | default: |
1727 | RELEASE_ASSERT_NOT_REACHED(); |
1728 | break; |
1729 | } |
1730 | } |
1731 | |
1732 | template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType> |
1733 | void SpeculativeJIT::compileContiguousPutByVal(Node* node, BaseOperandType& base, PropertyOperandType& property, ValueOperandType& value, GPRReg valuePayloadReg, TagType valueTag) |
1734 | { |
1735 | Edge child4 = m_jit.graph().varArgChild(node, 3); |
1736 | |
1737 | ArrayMode arrayMode = node->arrayMode(); |
1738 | |
1739 | GPRReg baseReg = base.gpr(); |
1740 | GPRReg propertyReg = property.gpr(); |
1741 | |
1742 | StorageOperand storage(this, child4); |
1743 | GPRReg storageReg = storage.gpr(); |
1744 | |
1745 | if (node->op() == PutByValAlias) { |
1746 | // Store the value to the array. |
1747 | GPRReg propertyReg = property.gpr(); |
1748 | m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
1749 | m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); |
1750 | |
1751 | noResult(node); |
1752 | return; |
1753 | } |
1754 | |
1755 | MacroAssembler::Jump slowCase; |
1756 | |
1757 | if (arrayMode.isInBounds()) { |
1758 | speculationCheck( |
1759 | OutOfBounds, JSValueRegs(), 0, |
1760 | m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
1761 | } else { |
1762 | MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); |
1763 | |
1764 | slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength())); |
1765 | |
1766 | if (!arrayMode.isOutOfBounds()) |
1767 | speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase); |
1768 | |
1769 | m_jit.add32(TrustedImm32(1), propertyReg); |
1770 | m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); |
1771 | m_jit.sub32(TrustedImm32(1), propertyReg); |
1772 | |
1773 | inBounds.link(&m_jit); |
1774 | } |
1775 | |
1776 | m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
1777 | m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); |
1778 | |
1779 | base.use(); |
1780 | property.use(); |
1781 | value.use(); |
1782 | storage.use(); |
1783 | |
1784 | if (arrayMode.isOutOfBounds()) { |
1785 | if (node->op() == PutByValDirect) { |
1786 | addSlowPathGenerator(slowPathCall( |
1787 | slowCase, this, |
1788 | m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict, |
1789 | NoResult, baseReg, propertyReg, JSValueRegs(valueTag, valuePayloadReg))); |
1790 | } else { |
1791 | addSlowPathGenerator(slowPathCall( |
1792 | slowCase, this, |
1793 | m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, |
1794 | NoResult, baseReg, propertyReg, JSValueRegs(valueTag, valuePayloadReg))); |
1795 | } |
1796 | } |
1797 | |
1798 | noResult(node, UseChildrenCalledExplicitly); |
1799 | } |
1800 | |
1801 | void SpeculativeJIT::compile(Node* node) |
1802 | { |
1803 | NodeType op = node->op(); |
1804 | |
1805 | #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) |
1806 | m_jit.clearRegisterAllocationOffsets(); |
1807 | #endif |
1808 | |
1809 | switch (op) { |
1810 | case JSConstant: |
1811 | case DoubleConstant: |
1812 | case PhantomDirectArguments: |
1813 | case PhantomClonedArguments: |
1814 | initConstantInfo(node); |
1815 | break; |
1816 | |
1817 | case LazyJSConstant: |
1818 | compileLazyJSConstant(node); |
1819 | break; |
1820 | |
1821 | case Identity: { |
1822 | compileIdentity(node); |
1823 | break; |
1824 | } |
1825 | |
1826 | case GetLocal: { |
1827 | AbstractValue& value = m_state.operand(node->local()); |
1828 | |
1829 | // If the CFA is tracking this variable and it found that the variable |
1830 | // cannot have been assigned, then don't attempt to proceed. |
1831 | if (value.isClear()) { |
1832 | m_compileOkay = false; |
1833 | break; |
1834 | } |
1835 | |
1836 | switch (node->variableAccessData()->flushFormat()) { |
1837 | case FlushedDouble: { |
1838 | FPRTemporary result(this); |
1839 | m_jit.loadDouble(JITCompiler::addressFor(node->machineLocal()), result.fpr()); |
1840 | VirtualRegister virtualRegister = node->virtualRegister(); |
1841 | m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble); |
1842 | generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr()); |
1843 | break; |
1844 | } |
1845 | |
1846 | case FlushedInt32: { |
1847 | GPRTemporary result(this); |
1848 | m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); |
1849 | |
1850 | // Like int32Result, but don't useChildren - our children are phi nodes, |
1851 | // and don't represent values within this dataflow with virtual registers. |
1852 | VirtualRegister virtualRegister = node->virtualRegister(); |
1853 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger); |
1854 | generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr()); |
1855 | break; |
1856 | } |
1857 | |
1858 | case FlushedCell: { |
1859 | GPRTemporary result(this); |
1860 | m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); |
1861 | |
1862 | // Like cellResult, but don't useChildren - our children are phi nodes, |
1863 | // and don't represent values within this dataflow with virtual registers. |
1864 | VirtualRegister virtualRegister = node->virtualRegister(); |
1865 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderCell); |
1866 | generationInfoFromVirtualRegister(virtualRegister).initCell(node, node->refCount(), result.gpr()); |
1867 | break; |
1868 | } |
1869 | |
1870 | case FlushedBoolean: { |
1871 | GPRTemporary result(this); |
1872 | m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); |
1873 | |
1874 | // Like booleanResult, but don't useChildren - our children are phi nodes, |
1875 | // and don't represent values within this dataflow with virtual registers. |
1876 | VirtualRegister virtualRegister = node->virtualRegister(); |
1877 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderBoolean); |
1878 | generationInfoFromVirtualRegister(virtualRegister).initBoolean(node, node->refCount(), result.gpr()); |
1879 | break; |
1880 | } |
1881 | |
1882 | case FlushedJSValue: { |
1883 | GPRTemporary result(this); |
1884 | GPRTemporary tag(this); |
1885 | m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr()); |
1886 | m_jit.load32(JITCompiler::tagFor(node->machineLocal()), tag.gpr()); |
1887 | |
1888 | // Like jsValueResult, but don't useChildren - our children are phi nodes, |
1889 | // and don't represent values within this dataflow with virtual registers. |
1890 | VirtualRegister virtualRegister = node->virtualRegister(); |
1891 | m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS); |
1892 | m_gprs.retain(tag.gpr(), virtualRegister, SpillOrderJS); |
1893 | |
1894 | generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), tag.gpr(), result.gpr(), DataFormatJS); |
1895 | break; |
1896 | } |
1897 | |
1898 | default: |
1899 | RELEASE_ASSERT_NOT_REACHED(); |
1900 | } |
1901 | break; |
1902 | } |
1903 | |
1904 | case MovHint: { |
1905 | compileMovHint(m_currentNode); |
1906 | noResult(node); |
1907 | break; |
1908 | } |
1909 | |
1910 | case ZombieHint: { |
1911 | recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead); |
1912 | noResult(node); |
1913 | break; |
1914 | } |
1915 | |
1916 | case ExitOK: { |
1917 | noResult(node); |
1918 | break; |
1919 | } |
1920 | |
1921 | case SetLocal: { |
1922 | switch (node->variableAccessData()->flushFormat()) { |
1923 | case FlushedDouble: { |
1924 | SpeculateDoubleOperand value(this, node->child1()); |
1925 | m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal())); |
1926 | noResult(node); |
1927 | // Indicate that it's no longer necessary to retrieve the value of |
1928 | // this bytecode variable from registers or other locations in the stack, |
1929 | // but that it is stored as a double. |
1930 | recordSetLocal(DataFormatDouble); |
1931 | break; |
1932 | } |
1933 | |
1934 | case FlushedInt32: { |
1935 | SpeculateInt32Operand value(this, node->child1()); |
1936 | m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal())); |
1937 | noResult(node); |
1938 | recordSetLocal(DataFormatInt32); |
1939 | break; |
1940 | } |
1941 | |
1942 | case FlushedCell: { |
1943 | SpeculateCellOperand cell(this, node->child1()); |
1944 | GPRReg cellGPR = cell.gpr(); |
1945 | m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node->machineLocal())); |
1946 | noResult(node); |
1947 | recordSetLocal(DataFormatCell); |
1948 | break; |
1949 | } |
1950 | |
1951 | case FlushedBoolean: { |
1952 | SpeculateBooleanOperand value(this, node->child1()); |
1953 | m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal())); |
1954 | noResult(node); |
1955 | recordSetLocal(DataFormatBoolean); |
1956 | break; |
1957 | } |
1958 | |
1959 | case FlushedJSValue: { |
1960 | JSValueOperand value(this, node->child1()); |
1961 | m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node->machineLocal())); |
1962 | m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node->machineLocal())); |
1963 | noResult(node); |
1964 | recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat())); |
1965 | break; |
1966 | } |
1967 | |
1968 | default: |
1969 | RELEASE_ASSERT_NOT_REACHED(); |
1970 | break; |
1971 | } |
1972 | break; |
1973 | } |
1974 | |
1975 | case SetArgumentDefinitely: |
1976 | case SetArgumentMaybe: |
1977 | // This is a no-op; it just marks the fact that the argument is being used. |
1978 | // But it may be profitable to use this as a hook to run speculation checks |
1979 | // on arguments, thereby allowing us to trivially eliminate such checks if |
1980 | // the argument is not used. |
1981 | recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat())); |
1982 | break; |
1983 | |
1984 | case ValueBitOr: |
1985 | case ValueBitAnd: |
1986 | case ValueBitXor: |
1987 | compileValueBitwiseOp(node); |
1988 | break; |
1989 | |
1990 | case ArithBitAnd: |
1991 | case ArithBitOr: |
1992 | case ArithBitXor: |
1993 | compileBitwiseOp(node); |
1994 | break; |
1995 | |
1996 | case ValueBitNot: |
1997 | compileValueBitNot(node); |
1998 | break; |
1999 | |
2000 | case ArithBitNot: |
2001 | compileBitwiseNot(node); |
2002 | break; |
2003 | |
2004 | case BitRShift: |
2005 | case BitLShift: |
2006 | case BitURShift: |
2007 | compileShiftOp(node); |
2008 | break; |
2009 | |
2010 | case UInt32ToNumber: { |
2011 | compileUInt32ToNumber(node); |
2012 | break; |
2013 | } |
2014 | |
2015 | case DoubleAsInt32: { |
2016 | compileDoubleAsInt32(node); |
2017 | break; |
2018 | } |
2019 | |
2020 | case ValueToInt32: { |
2021 | compileValueToInt32(node); |
2022 | break; |
2023 | } |
2024 | |
2025 | case DoubleRep: { |
2026 | compileDoubleRep(node); |
2027 | break; |
2028 | } |
2029 | |
2030 | case ValueRep: { |
2031 | compileValueRep(node); |
2032 | break; |
2033 | } |
2034 | |
2035 | case ValueNegate: |
2036 | compileValueNegate(node); |
2037 | break; |
2038 | |
2039 | case ValueAdd: |
2040 | compileValueAdd(node); |
2041 | break; |
2042 | |
2043 | case ValueSub: |
2044 | compileValueSub(node); |
2045 | break; |
2046 | |
2047 | case StrCat: { |
2048 | compileStrCat(node); |
2049 | break; |
2050 | } |
2051 | |
2052 | case ArithAdd: |
2053 | compileArithAdd(node); |
2054 | break; |
2055 | |
2056 | case ArithClz32: |
2057 | compileArithClz32(node); |
2058 | break; |
2059 | |
2060 | case MakeRope: |
2061 | compileMakeRope(node); |
2062 | break; |
2063 | |
2064 | case ArithSub: |
2065 | compileArithSub(node); |
2066 | break; |
2067 | |
2068 | case ArithNegate: |
2069 | compileArithNegate(node); |
2070 | break; |
2071 | |
2072 | case ArithMul: |
2073 | compileArithMul(node); |
2074 | break; |
2075 | |
2076 | case ValueMul: |
2077 | compileValueMul(node); |
2078 | break; |
2079 | |
2080 | case ValueDiv: { |
2081 | compileValueDiv(node); |
2082 | break; |
2083 | } |
2084 | |
2085 | case ArithDiv: { |
2086 | compileArithDiv(node); |
2087 | break; |
2088 | } |
2089 | |
2090 | case ValueMod: { |
2091 | compileValueMod(node); |
2092 | break; |
2093 | } |
2094 | |
2095 | case ArithMod: { |
2096 | compileArithMod(node); |
2097 | break; |
2098 | } |
2099 | |
2100 | case ValuePow: { |
2101 | compileValuePow(node); |
2102 | break; |
2103 | } |
2104 | |
2105 | case ArithPow: { |
2106 | compileArithPow(node); |
2107 | break; |
2108 | } |
2109 | |
2110 | case ArithAbs: |
2111 | compileArithAbs(node); |
2112 | break; |
2113 | |
2114 | case ArithMin: |
2115 | case ArithMax: { |
2116 | compileArithMinMax(node); |
2117 | break; |
2118 | } |
2119 | |
2120 | case ArithSqrt: |
2121 | compileArithSqrt(node); |
2122 | break; |
2123 | |
2124 | case ArithFRound: |
2125 | compileArithFRound(node); |
2126 | break; |
2127 | |
2128 | case ArithRandom: |
2129 | compileArithRandom(node); |
2130 | break; |
2131 | |
2132 | case ArithRound: |
2133 | case ArithFloor: |
2134 | case ArithCeil: |
2135 | case ArithTrunc: |
2136 | compileArithRounding(node); |
2137 | break; |
2138 | |
2139 | case ArithUnary: |
2140 | compileArithUnary(node); |
2141 | break; |
2142 | |
2143 | case LogicalNot: |
2144 | compileLogicalNot(node); |
2145 | break; |
2146 | |
2147 | case CompareLess: |
2148 | if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess)) |
2149 | return; |
2150 | break; |
2151 | |
2152 | case CompareLessEq: |
2153 | if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq)) |
2154 | return; |
2155 | break; |
2156 | |
2157 | case CompareGreater: |
2158 | if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater)) |
2159 | return; |
2160 | break; |
2161 | |
2162 | case CompareGreaterEq: |
2163 | if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq)) |
2164 | return; |
2165 | break; |
2166 | |
2167 | case CompareBelow: |
2168 | compileCompareUnsigned(node, JITCompiler::Below); |
2169 | break; |
2170 | |
2171 | case CompareBelowEq: |
2172 | compileCompareUnsigned(node, JITCompiler::BelowOrEqual); |
2173 | break; |
2174 | |
2175 | case CompareEq: |
2176 | if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq)) |
2177 | return; |
2178 | break; |
2179 | |
2180 | case CompareStrictEq: |
2181 | if (compileStrictEq(node)) |
2182 | return; |
2183 | break; |
2184 | |
2185 | case CompareEqPtr: |
2186 | compileCompareEqPtr(node); |
2187 | break; |
2188 | |
2189 | case SameValue: |
2190 | compileSameValue(node); |
2191 | break; |
2192 | |
2193 | case StringCharCodeAt: { |
2194 | compileGetCharCodeAt(node); |
2195 | break; |
2196 | } |
2197 | |
2198 | case StringCharAt: { |
2199 | // Relies on StringCharAt node having same basic layout as GetByVal |
2200 | compileGetByValOnString(node); |
2201 | break; |
2202 | } |
2203 | |
2204 | case StringFromCharCode: { |
2205 | compileFromCharCode(node); |
2206 | break; |
2207 | } |
2208 | |
2209 | case CheckArray: { |
2210 | checkArray(node); |
2211 | break; |
2212 | } |
2213 | |
2214 | case Arrayify: |
2215 | case ArrayifyToStructure: { |
2216 | arrayify(node); |
2217 | break; |
2218 | } |
2219 | |
2220 | case GetByVal: { |
2221 | switch (node->arrayMode().type()) { |
2222 | case Array::SelectUsingPredictions: |
2223 | case Array::ForceExit: |
2224 | RELEASE_ASSERT_NOT_REACHED(); |
2225 | #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) |
2226 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); |
2227 | #endif |
2228 | break; |
2229 | case Array::Undecided: { |
2230 | SpeculateStrictInt32Operand index(this, m_graph.varArgChild(node, 1)); |
2231 | GPRTemporary resultTag(this, Reuse, index); |
2232 | GPRTemporary resultPayload(this); |
2233 | |
2234 | GPRReg indexGPR = index.gpr(); |
2235 | GPRReg resultTagGPR = resultTag.gpr(); |
2236 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
2237 | |
2238 | speculationCheck(OutOfBounds, JSValueRegs(), node, |
2239 | m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0))); |
2240 | |
2241 | use(m_graph.varArgChild(node, 0)); |
2242 | index.use(); |
2243 | |
2244 | m_jit.move(MacroAssembler::TrustedImm32(JSValue::UndefinedTag), resultTagGPR); |
2245 | m_jit.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR); |
2246 | jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); |
2247 | break; |
2248 | } |
2249 | case Array::Generic: { |
2250 | if (m_graph.varArgChild(node, 0).useKind() == ObjectUse) { |
2251 | if (m_graph.varArgChild(node, 1).useKind() == StringUse) { |
2252 | compileGetByValForObjectWithString(node); |
2253 | break; |
2254 | } |
2255 | |
2256 | if (m_graph.varArgChild(node, 1).useKind() == SymbolUse) { |
2257 | compileGetByValForObjectWithSymbol(node); |
2258 | break; |
2259 | } |
2260 | } |
2261 | |
2262 | SpeculateCellOperand base(this, m_graph.varArgChild(node, 0)); // Save a register, speculate cell. We'll probably be right. |
2263 | JSValueOperand property(this, m_graph.varArgChild(node, 1)); |
2264 | GPRReg baseGPR = base.gpr(); |
2265 | JSValueRegs propertyRegs = property.jsValueRegs(); |
2266 | |
2267 | flushRegisters(); |
2268 | JSValueRegsFlushedCallResult result(this); |
2269 | JSValueRegs resultRegs = result.regs(); |
2270 | callOperation(operationGetByValCell, resultRegs, baseGPR, propertyRegs); |
2271 | m_jit.exceptionCheck(); |
2272 | |
2273 | jsValueResult(resultRegs, node); |
2274 | break; |
2275 | } |
2276 | case Array::Int32: |
2277 | case Array::Contiguous: { |
2278 | if (node->arrayMode().isInBounds()) { |
2279 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2280 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2281 | |
2282 | GPRReg propertyReg = property.gpr(); |
2283 | GPRReg storageReg = storage.gpr(); |
2284 | |
2285 | if (!m_compileOkay) |
2286 | return; |
2287 | |
2288 | speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
2289 | |
2290 | GPRTemporary resultPayload(this); |
2291 | if (node->arrayMode().type() == Array::Int32) { |
2292 | ASSERT(!node->arrayMode().isSaneChain()); |
2293 | |
2294 | speculationCheck( |
2295 | OutOfBounds, JSValueRegs(), 0, |
2296 | m_jit.branch32( |
2297 | MacroAssembler::Equal, |
2298 | MacroAssembler::BaseIndex( |
2299 | storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset), |
2300 | TrustedImm32(JSValue::EmptyValueTag))); |
2301 | m_jit.load32( |
2302 | MacroAssembler::BaseIndex( |
2303 | storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset), |
2304 | resultPayload.gpr()); |
2305 | int32Result(resultPayload.gpr(), node); |
2306 | break; |
2307 | } |
2308 | |
2309 | GPRTemporary resultTag(this); |
2310 | m_jit.load32( |
2311 | MacroAssembler::BaseIndex( |
2312 | storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset), |
2313 | resultTag.gpr()); |
2314 | m_jit.load32( |
2315 | MacroAssembler::BaseIndex( |
2316 | storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset), |
2317 | resultPayload.gpr()); |
2318 | if (node->arrayMode().isSaneChain()) { |
2319 | JITCompiler::Jump notHole = m_jit.branchIfNotEmpty(resultTag.gpr()); |
2320 | m_jit.move(TrustedImm32(JSValue::UndefinedTag), resultTag.gpr()); |
2321 | m_jit.move(TrustedImm32(0), resultPayload.gpr()); |
2322 | notHole.link(&m_jit); |
2323 | } else { |
2324 | speculationCheck( |
2325 | LoadFromHole, JSValueRegs(), 0, |
2326 | m_jit.branchIfEmpty(resultTag.gpr())); |
2327 | } |
2328 | jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); |
2329 | break; |
2330 | } |
2331 | |
2332 | SpeculateCellOperand base(this, m_graph.varArgChild(node, 0)); |
2333 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2334 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2335 | |
2336 | GPRReg baseReg = base.gpr(); |
2337 | GPRReg propertyReg = property.gpr(); |
2338 | GPRReg storageReg = storage.gpr(); |
2339 | |
2340 | if (!m_compileOkay) |
2341 | return; |
2342 | |
2343 | GPRTemporary resultTag(this); |
2344 | GPRTemporary resultPayload(this); |
2345 | GPRReg resultTagReg = resultTag.gpr(); |
2346 | GPRReg resultPayloadReg = resultPayload.gpr(); |
2347 | |
2348 | MacroAssembler::JumpList slowCases; |
2349 | |
2350 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
2351 | |
2352 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); |
2353 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); |
2354 | slowCases.append(m_jit.branchIfEmpty(resultTagReg)); |
2355 | |
2356 | addSlowPathGenerator( |
2357 | slowPathCall( |
2358 | slowCases, this, operationGetByValObjectInt, |
2359 | JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg)); |
2360 | |
2361 | jsValueResult(resultTagReg, resultPayloadReg, node); |
2362 | break; |
2363 | } |
2364 | case Array::Double: { |
2365 | if (node->arrayMode().isInBounds()) { |
2366 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2367 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2368 | |
2369 | GPRReg propertyReg = property.gpr(); |
2370 | GPRReg storageReg = storage.gpr(); |
2371 | |
2372 | if (!m_compileOkay) |
2373 | return; |
2374 | |
2375 | speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
2376 | |
2377 | FPRTemporary result(this); |
2378 | m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr()); |
2379 | if (!node->arrayMode().isSaneChain()) |
2380 | speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchIfNaN(result.fpr())); |
2381 | doubleResult(result.fpr(), node); |
2382 | break; |
2383 | } |
2384 | |
2385 | SpeculateCellOperand base(this, m_graph.varArgChild(node, 0)); |
2386 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2387 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2388 | |
2389 | GPRReg baseReg = base.gpr(); |
2390 | GPRReg propertyReg = property.gpr(); |
2391 | GPRReg storageReg = storage.gpr(); |
2392 | |
2393 | if (!m_compileOkay) |
2394 | return; |
2395 | |
2396 | GPRTemporary resultTag(this); |
2397 | GPRTemporary resultPayload(this); |
2398 | FPRTemporary temp(this); |
2399 | GPRReg resultTagReg = resultTag.gpr(); |
2400 | GPRReg resultPayloadReg = resultPayload.gpr(); |
2401 | FPRReg tempReg = temp.fpr(); |
2402 | |
2403 | MacroAssembler::JumpList slowCases; |
2404 | |
2405 | slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); |
2406 | |
2407 | m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg); |
2408 | slowCases.append(m_jit.branchIfNaN(tempReg)); |
2409 | boxDouble(tempReg, resultTagReg, resultPayloadReg); |
2410 | |
2411 | addSlowPathGenerator( |
2412 | slowPathCall( |
2413 | slowCases, this, operationGetByValObjectInt, |
2414 | JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg)); |
2415 | |
2416 | jsValueResult(resultTagReg, resultPayloadReg, node); |
2417 | break; |
2418 | } |
2419 | case Array::ArrayStorage: |
2420 | case Array::SlowPutArrayStorage: { |
2421 | if (node->arrayMode().isInBounds()) { |
2422 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2423 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2424 | GPRReg propertyReg = property.gpr(); |
2425 | GPRReg storageReg = storage.gpr(); |
2426 | |
2427 | if (!m_compileOkay) |
2428 | return; |
2429 | |
2430 | speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); |
2431 | |
2432 | GPRTemporary resultTag(this); |
2433 | GPRTemporary resultPayload(this); |
2434 | |
2435 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); |
2436 | speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchIfEmpty(resultTag.gpr())); |
2437 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); |
2438 | |
2439 | jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); |
2440 | break; |
2441 | } |
2442 | |
2443 | SpeculateCellOperand base(this, m_graph.varArgChild(node, 0)); |
2444 | SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1)); |
2445 | StorageOperand storage(this, m_graph.varArgChild(node, 2)); |
2446 | GPRReg propertyReg = property.gpr(); |
2447 | GPRReg storageReg = storage.gpr(); |
2448 | GPRReg baseReg = base.gpr(); |
2449 | |
2450 | if (!m_compileOkay) |
2451 | return; |
2452 | |
2453 | GPRTemporary resultTag(this); |
2454 | GPRTemporary resultPayload(this); |
2455 | GPRReg resultTagReg = resultTag.gpr(); |
2456 | GPRReg resultPayloadReg = resultPayload.gpr(); |
2457 | |
2458 | JITCompiler::Jump outOfBounds = m_jit.branch32( |
2459 | MacroAssembler::AboveOrEqual, propertyReg, |
2460 | MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); |
2461 | |
2462 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); |
2463 | JITCompiler::Jump hole = m_jit.branchIfEmpty(resultTag.gpr()); |
2464 | m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); |
2465 | |
2466 | JITCompiler::JumpList slowCases; |
2467 | slowCases.append(outOfBounds); |
2468 | slowCases.append(hole); |
2469 | addSlowPathGenerator( |
2470 | slowPathCall( |
2471 | slowCases, this, operationGetByValObjectInt, |
2472 | JSValueRegs(resultTagReg, resultPayloadReg), |
2473 | baseReg, propertyReg)); |
2474 | |
2475 | jsValueResult(resultTagReg, resultPayloadReg, node); |
2476 | break; |
2477 | } |
2478 | case Array::String: |
2479 | compileGetByValOnString(node); |
2480 | break; |
2481 | case Array::DirectArguments: |
2482 | compileGetByValOnDirectArguments(node); |
2483 | break; |
2484 | case Array::ScopedArguments: |
2485 | compileGetByValOnScopedArguments(node); |
2486 | break; |
2487 | default: { |
2488 | TypedArrayType type = node->arrayMode().typedArrayType(); |
2489 | if (isInt(type)) |
2490 | compileGetByValOnIntTypedArray(node, type); |
2491 | else |
2492 | compileGetByValOnFloatTypedArray(node, type); |
2493 | } } |
2494 | break; |
2495 | } |
2496 | |
2497 | case StringSlice: { |
2498 | compileStringSlice(node); |
2499 | break; |
2500 | } |
2501 | |
2502 | case ToLowerCase: { |
2503 | compileToLowerCase(node); |
2504 | break; |
2505 | } |
2506 | |
2507 | case NumberToStringWithRadix: { |
2508 | compileNumberToStringWithRadix(node); |
2509 | break; |
2510 | } |
2511 | |
2512 | case NumberToStringWithValidRadixConstant: { |
2513 | compileNumberToStringWithValidRadixConstant(node); |
2514 | break; |
2515 | } |
2516 | |
2517 | case GetByValWithThis: { |
2518 | compileGetByValWithThis(node); |
2519 | break; |
2520 | } |
2521 | |
2522 | case PutByValDirect: |
2523 | case PutByVal: |
2524 | case PutByValAlias: { |
2525 | Edge child1 = m_jit.graph().varArgChild(node, 0); |
2526 | Edge child2 = m_jit.graph().varArgChild(node, 1); |
2527 | Edge child3 = m_jit.graph().varArgChild(node, 2); |
2528 | Edge child4 = m_jit.graph().varArgChild(node, 3); |
2529 | |
2530 | ArrayMode arrayMode = node->arrayMode().modeForPut(); |
2531 | bool alreadyHandled = false; |
2532 | |
2533 | switch (arrayMode.type()) { |
2534 | case Array::SelectUsingPredictions: |
2535 | case Array::ForceExit: |
2536 | RELEASE_ASSERT_NOT_REACHED(); |
2537 | #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) |
2538 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); |
2539 | alreadyHandled = true; |
2540 | #endif |
2541 | break; |
2542 | case Array::Generic: { |
2543 | ASSERT(node->op() == PutByVal || node->op() == PutByValDirect); |
2544 | |
2545 | if (child1.useKind() == CellUse) { |
2546 | if (child2.useKind() == StringUse) { |
2547 | compilePutByValForCellWithString(node, child1, child2, child3); |
2548 | alreadyHandled = true; |
2549 | break; |
2550 | } |
2551 | |
2552 | if (child2.useKind() == SymbolUse) { |
2553 | compilePutByValForCellWithSymbol(node, child1, child2, child3); |
2554 | alreadyHandled = true; |
2555 | break; |
2556 | } |
2557 | } |
2558 | |
2559 | SpeculateCellOperand base(this, child1); // Save a register, speculate cell. We'll probably be right. |
2560 | JSValueOperand property(this, child2); |
2561 | JSValueOperand value(this, child3); |
2562 | GPRReg baseGPR = base.gpr(); |
2563 | JSValueRegs propertyRegs = property.jsValueRegs(); |
2564 | JSValueRegs valueRegs = value.jsValueRegs(); |
2565 | |
2566 | flushRegisters(); |
2567 | if (node->op() == PutByValDirect) |
2568 | callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict, baseGPR, propertyRegs, valueRegs); |
2569 | else |
2570 | callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellStrict : operationPutByValCellNonStrict, baseGPR, propertyRegs, valueRegs); |
2571 | m_jit.exceptionCheck(); |
2572 | |
2573 | noResult(node); |
2574 | alreadyHandled = true; |
2575 | break; |
2576 | } |
2577 | default: |
2578 | break; |
2579 | } |
2580 | |
2581 | if (alreadyHandled) |
2582 | break; |
2583 | |
2584 | SpeculateCellOperand base(this, child1); |
2585 | SpeculateStrictInt32Operand property(this, child2); |
2586 | |
2587 | GPRReg baseReg = base.gpr(); |
2588 | GPRReg propertyReg = property.gpr(); |
2589 | |
2590 | switch (arrayMode.type()) { |
2591 | case Array::Int32: { |
2592 | speculateInt32(child3); |
2593 | FALLTHROUGH; |
2594 | } |
2595 | case Array::Contiguous: { |
2596 | JSValueOperand value(this, child3, ManualOperandSpeculation); |
2597 | |
2598 | GPRReg valueTagReg = value.tagGPR(); |
2599 | GPRReg valuePayloadReg = value.payloadGPR(); |
2600 | |
2601 | if (!m_compileOkay) |
2602 | return; |
2603 | |
2604 | compileContiguousPutByVal(node, base, property, value, valuePayloadReg, valueTagReg); |
2605 | break; |
2606 | } |
2607 | case Array::Double: { |
2608 | compileDoublePutByVal(node, base, property); |
2609 | break; |
2610 | } |
2611 | case Array::ArrayStorage: |
2612 | case Array::SlowPutArrayStorage: { |
2613 | JSValueOperand value(this, child3); |
2614 | |
2615 | GPRReg valueTagReg = value.tagGPR(); |
2616 | GPRReg valuePayloadReg = value.payloadGPR(); |
2617 | |
2618 | if (!m_compileOkay) |
2619 | return; |
2620 | |
2621 | StorageOperand storage(this, child4); |
2622 | GPRReg storageReg = storage.gpr(); |
2623 | |
2624 | if (node->op() == PutByValAlias) { |
2625 | // Store the value to the array. |
2626 | GPRReg propertyReg = property.gpr(); |
2627 | m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
2628 | m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); |
2629 | |
2630 | noResult(node); |
2631 | break; |
2632 | } |
2633 | |
2634 | MacroAssembler::JumpList slowCases; |
2635 | |
2636 | MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); |
2637 | if (!arrayMode.isOutOfBounds()) |
2638 | speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds); |
2639 | else |
2640 | slowCases.append(beyondArrayBounds); |
2641 | |
2642 | // Check if we're writing to a hole; if so increment m_numValuesInVector. |
2643 | if (arrayMode.isInBounds()) { |
2644 | speculationCheck( |
2645 | StoreToHole, JSValueRegs(), 0, |
2646 | m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag))); |
2647 | } else { |
2648 | MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); |
2649 | if (arrayMode.isSlowPut()) { |
2650 | // This is sort of strange. If we wanted to optimize this code path, we would invert |
2651 | // the above branch. But it's simply not worth it since this only happens if we're |
2652 | // already having a bad time. |
2653 | slowCases.append(m_jit.jump()); |
2654 | } else { |
2655 | m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); |
2656 | |
2657 | // If we're writing to a hole we might be growing the array; |
2658 | MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); |
2659 | m_jit.add32(TrustedImm32(1), propertyReg); |
2660 | m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); |
2661 | m_jit.sub32(TrustedImm32(1), propertyReg); |
2662 | |
2663 | lengthDoesNotNeedUpdate.link(&m_jit); |
2664 | } |
2665 | notHoleValue.link(&m_jit); |
2666 | } |
2667 | |
2668 | // Store the value to the array. |
2669 | m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
2670 | m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); |
2671 | |
2672 | base.use(); |
2673 | property.use(); |
2674 | value.use(); |
2675 | storage.use(); |
2676 | |
2677 | if (!slowCases.empty()) { |
2678 | if (node->op() == PutByValDirect) { |
2679 | addSlowPathGenerator(slowPathCall( |
2680 | slowCases, this, |
2681 | m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict, |
2682 | NoResult, baseReg, propertyReg, JSValueRegs(valueTagReg, valuePayloadReg))); |
2683 | } else { |
2684 | addSlowPathGenerator(slowPathCall( |
2685 | slowCases, this, |
2686 | m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, |
2687 | NoResult, baseReg, propertyReg, JSValueRegs(valueTagReg, valuePayloadReg))); |
2688 | } |
2689 | } |
2690 | |
2691 | noResult(node, UseChildrenCalledExplicitly); |
2692 | break; |
2693 | } |
2694 | |
2695 | default: { |
2696 | TypedArrayType type = arrayMode.typedArrayType(); |
2697 | if (isInt(type)) |
2698 | compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type); |
2699 | else |
2700 | compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type); |
2701 | } } |
2702 | break; |
2703 | } |
2704 | |
2705 | case PutByValWithThis: { |
2706 | #if CPU(X86) |
2707 | // We don't have enough registers on X86 to do this |
2708 | // without setting up the call frame incrementally. |
2709 | unsigned index = 0; |
2710 | m_jit.poke(GPRInfo::callFrameRegister, index++); |
2711 | |
2712 | { |
2713 | JSValueOperand base(this, m_jit.graph().varArgChild(node, 0)); |
2714 | GPRReg baseTag = base.tagGPR(); |
2715 | GPRReg basePayload = base.payloadGPR(); |
2716 | |
2717 | JSValueOperand thisValue(this, m_jit.graph().varArgChild(node, 1)); |
2718 | GPRReg thisValueTag = thisValue.tagGPR(); |
2719 | GPRReg thisValuePayload = thisValue.payloadGPR(); |
2720 | |
2721 | JSValueOperand property(this, m_jit.graph().varArgChild(node, 2)); |
2722 | GPRReg propertyTag = property.tagGPR(); |
2723 | GPRReg propertyPayload = property.payloadGPR(); |
2724 | |
2725 | m_jit.poke(basePayload, index++); |
2726 | m_jit.poke(baseTag, index++); |
2727 | |
2728 | m_jit.poke(thisValuePayload, index++); |
2729 | m_jit.poke(thisValueTag, index++); |
2730 | |
2731 | m_jit.poke(propertyPayload, index++); |
2732 | m_jit.poke(propertyTag, index++); |
2733 | |
2734 | flushRegisters(); |
2735 | } |
2736 | |
2737 | JSValueOperand value(this, m_jit.graph().varArgChild(node, 3)); |
2738 | GPRReg valueTag = value.tagGPR(); |
2739 | GPRReg valuePayload = value.payloadGPR(); |
2740 | m_jit.poke(valuePayload, index++); |
2741 | m_jit.poke(valueTag, index++); |
2742 | |
2743 | flushRegisters(); |
2744 | appendCall(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis); |
2745 | m_jit.exceptionCheck(); |
2746 | #else |
2747 | static_assert(GPRInfo::numberOfRegisters >= 8, "We are assuming we have enough registers to make this call without incrementally setting up the arguments." ); |
2748 | |
2749 | JSValueOperand base(this, m_jit.graph().varArgChild(node, 0)); |
2750 | JSValueRegs baseRegs = base.jsValueRegs(); |
2751 | |
2752 | JSValueOperand thisValue(this, m_jit.graph().varArgChild(node, 1)); |
2753 | JSValueRegs thisRegs = thisValue.jsValueRegs(); |
2754 | |
2755 | JSValueOperand property(this, m_jit.graph().varArgChild(node, 2)); |
2756 | JSValueRegs propertyRegs = property.jsValueRegs(); |
2757 | |
2758 | JSValueOperand value(this, m_jit.graph().varArgChild(node, 3)); |
2759 | JSValueRegs valueRegs = value.jsValueRegs(); |
2760 | |
2761 | flushRegisters(); |
2762 | callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis, |
2763 | NoResult, baseRegs, thisRegs, propertyRegs, valueRegs); |
2764 | m_jit.exceptionCheck(); |
2765 | #endif // CPU(X86) |
2766 | |
2767 | noResult(node); |
2768 | break; |
2769 | } |
2770 | |
2771 | case RegExpExec: { |
2772 | compileRegExpExec(node); |
2773 | break; |
2774 | } |
2775 | |
2776 | case RegExpExecNonGlobalOrSticky: { |
2777 | compileRegExpExecNonGlobalOrSticky(node); |
2778 | break; |
2779 | } |
2780 | |
2781 | case RegExpMatchFastGlobal: { |
2782 | compileRegExpMatchFastGlobal(node); |
2783 | break; |
2784 | } |
2785 | |
2786 | case RegExpTest: { |
2787 | compileRegExpTest(node); |
2788 | break; |
2789 | } |
2790 | |
2791 | case RegExpMatchFast: { |
2792 | compileRegExpMatchFast(node); |
2793 | break; |
2794 | } |
2795 | |
2796 | case StringReplace: |
2797 | case StringReplaceRegExp: { |
2798 | compileStringReplace(node); |
2799 | break; |
2800 | } |
2801 | |
2802 | case GetRegExpObjectLastIndex: { |
2803 | compileGetRegExpObjectLastIndex(node); |
2804 | break; |
2805 | } |
2806 | |
2807 | case SetRegExpObjectLastIndex: { |
2808 | compileSetRegExpObjectLastIndex(node); |
2809 | break; |
2810 | } |
2811 | |
2812 | case RecordRegExpCachedResult: { |
2813 | compileRecordRegExpCachedResult(node); |
2814 | break; |
2815 | } |
2816 | |
2817 | case ArrayPush: { |
2818 | compileArrayPush(node); |
2819 | break; |
2820 | } |
2821 | |
2822 | case ArrayPop: { |
2823 | ASSERT(node->arrayMode().isJSArray()); |
2824 | |
2825 | SpeculateCellOperand base(this, node->child1()); |
2826 | StorageOperand storage(this, node->child2()); |
2827 | GPRTemporary valueTag(this); |
2828 | GPRTemporary valuePayload(this); |
2829 | |
2830 | GPRReg baseGPR = base.gpr(); |
2831 | GPRReg valueTagGPR = valueTag.gpr(); |
2832 | GPRReg valuePayloadGPR = valuePayload.gpr(); |
2833 | GPRReg storageGPR = storage.gpr(); |
2834 | |
2835 | switch (node->arrayMode().type()) { |
2836 | case Array::Int32: |
2837 | case Array::Contiguous: { |
2838 | m_jit.load32( |
2839 | MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR); |
2840 | MacroAssembler::Jump undefinedCase = |
2841 | m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR); |
2842 | m_jit.sub32(TrustedImm32(1), valuePayloadGPR); |
2843 | m_jit.store32( |
2844 | valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); |
2845 | m_jit.load32( |
2846 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), |
2847 | valueTagGPR); |
2848 | MacroAssembler::Jump slowCase = m_jit.branchIfEmpty(valueTagGPR); |
2849 | m_jit.store32( |
2850 | MacroAssembler::TrustedImm32(JSValue::EmptyValueTag), |
2851 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
2852 | m_jit.load32( |
2853 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), |
2854 | valuePayloadGPR); |
2855 | |
2856 | addSlowPathGenerator( |
2857 | slowPathMove( |
2858 | undefinedCase, this, |
2859 | MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, |
2860 | MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); |
2861 | addSlowPathGenerator( |
2862 | slowPathCall( |
2863 | slowCase, this, operationArrayPopAndRecoverLength, |
2864 | JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); |
2865 | |
2866 | jsValueResult(valueTagGPR, valuePayloadGPR, node); |
2867 | break; |
2868 | } |
2869 | |
2870 | case Array::Double: { |
2871 | FPRTemporary temp(this); |
2872 | FPRReg tempFPR = temp.fpr(); |
2873 | |
2874 | m_jit.load32( |
2875 | MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR); |
2876 | MacroAssembler::Jump undefinedCase = |
2877 | m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR); |
2878 | m_jit.sub32(TrustedImm32(1), valuePayloadGPR); |
2879 | m_jit.store32( |
2880 | valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); |
2881 | m_jit.loadDouble( |
2882 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight), |
2883 | tempFPR); |
2884 | MacroAssembler::Jump slowCase = m_jit.branchIfNaN(tempFPR); |
2885 | JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN); |
2886 | m_jit.store32( |
2887 | MacroAssembler::TrustedImm32(nan.u.asBits.tag), |
2888 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
2889 | m_jit.store32( |
2890 | MacroAssembler::TrustedImm32(nan.u.asBits.payload), |
2891 | MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); |
2892 | boxDouble(tempFPR, valueTagGPR, valuePayloadGPR); |
2893 | |
2894 | addSlowPathGenerator( |
2895 | slowPathMove( |
2896 | undefinedCase, this, |
2897 | MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, |
2898 | MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); |
2899 | addSlowPathGenerator( |
2900 | slowPathCall( |
2901 | slowCase, this, operationArrayPopAndRecoverLength, |
2902 | JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); |
2903 | |
2904 | jsValueResult(valueTagGPR, valuePayloadGPR, node); |
2905 | break; |
2906 | } |
2907 | |
2908 | case Array::ArrayStorage: { |
2909 | GPRTemporary storageLength(this); |
2910 | GPRReg storageLengthGPR = storageLength.gpr(); |
2911 | |
2912 | m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); |
2913 | |
2914 | JITCompiler::JumpList setUndefinedCases; |
2915 | setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR)); |
2916 | |
2917 | m_jit.sub32(TrustedImm32(1), storageLengthGPR); |
2918 | |
2919 | MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); |
2920 | |
2921 | m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR); |
2922 | m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR); |
2923 | |
2924 | m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); |
2925 | |
2926 | setUndefinedCases.append(m_jit.branchIfEmpty(valueTagGPR)); |
2927 | |
2928 | m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
2929 | |
2930 | m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); |
2931 | |
2932 | addSlowPathGenerator( |
2933 | slowPathMove( |
2934 | setUndefinedCases, this, |
2935 | MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, |
2936 | MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); |
2937 | |
2938 | addSlowPathGenerator( |
2939 | slowPathCall( |
2940 | slowCase, this, operationArrayPop, |
2941 | JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); |
2942 | |
2943 | jsValueResult(valueTagGPR, valuePayloadGPR, node); |
2944 | break; |
2945 | } |
2946 | |
2947 | default: |
2948 | CRASH(); |
2949 | break; |
2950 | } |
2951 | break; |
2952 | } |
2953 | |
2954 | case ArraySlice: { |
2955 | compileArraySlice(node); |
2956 | break; |
2957 | } |
2958 | |
2959 | case ArrayIndexOf: { |
2960 | compileArrayIndexOf(node); |
2961 | break; |
2962 | } |
2963 | |
2964 | case DFG::Jump: { |
2965 | jump(node->targetBlock()); |
2966 | noResult(node); |
2967 | break; |
2968 | } |
2969 | |
2970 | case Branch: |
2971 | emitBranch(node); |
2972 | break; |
2973 | |
2974 | case Switch: |
2975 | emitSwitch(node); |
2976 | break; |
2977 | |
2978 | case Return: { |
2979 | ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT2); |
2980 | ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR); |
2981 | ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister); |
2982 | |
2983 | // Return the result in returnValueGPR. |
2984 | JSValueOperand op1(this, node->child1()); |
2985 | op1.fill(); |
2986 | if (op1.isDouble()) |
2987 | boxDouble(op1.fpr(), GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR); |
2988 | else { |
2989 | if (op1.payloadGPR() == GPRInfo::returnValueGPR2 && op1.tagGPR() == GPRInfo::returnValueGPR) |
2990 | m_jit.swap(GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2); |
2991 | else if (op1.payloadGPR() == GPRInfo::returnValueGPR2) { |
2992 | m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR); |
2993 | m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2); |
2994 | } else { |
2995 | m_jit.move(op1.tagGPR(), GPRInfo::returnValueGPR2); |
2996 | m_jit.move(op1.payloadGPR(), GPRInfo::returnValueGPR); |
2997 | } |
2998 | } |
2999 | |
3000 | m_jit.emitRestoreCalleeSaves(); |
3001 | m_jit.emitFunctionEpilogue(); |
3002 | m_jit.ret(); |
3003 | |
3004 | noResult(node); |
3005 | break; |
3006 | } |
3007 | |
3008 | case Throw: { |
3009 | compileThrow(node); |
3010 | break; |
3011 | } |
3012 | |
3013 | case ThrowStaticError: { |
3014 | compileThrowStaticError(node); |
3015 | break; |
3016 | } |
3017 | |
3018 | case BooleanToNumber: { |
3019 | switch (node->child1().useKind()) { |
3020 | case BooleanUse: { |
3021 | SpeculateBooleanOperand value(this, node->child1()); |
3022 | GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add). |
3023 | |
3024 | m_jit.move(value.gpr(), result.gpr()); |
3025 | |
3026 | int32Result(result.gpr(), node); |
3027 | break; |
3028 | } |
3029 | |
3030 | case UntypedUse: { |
3031 | JSValueOperand value(this, node->child1()); |
3032 | |
3033 | if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) { |
3034 | GPRTemporary result(this); |
3035 | |
3036 | GPRReg valueGPR = value.payloadGPR(); |
3037 | GPRReg resultGPR = result.gpr(); |
3038 | |
3039 | m_jit.move(valueGPR, resultGPR); |
3040 | int32Result(result.gpr(), node); |
3041 | break; |
3042 | } |
3043 | |
3044 | GPRTemporary resultTag(this); |
3045 | GPRTemporary resultPayload(this); |
3046 | |
3047 | GPRReg valueTagGPR = value.tagGPR(); |
3048 | GPRReg valuePayloadGPR = value.payloadGPR(); |
3049 | GPRReg resultTagGPR = resultTag.gpr(); |
3050 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
3051 | |
3052 | m_jit.move(valuePayloadGPR, resultPayloadGPR); |
3053 | JITCompiler::Jump isBoolean = m_jit.branchIfBoolean(valueTagGPR, InvalidGPRReg); |
3054 | m_jit.move(valueTagGPR, resultTagGPR); |
3055 | JITCompiler::Jump done = m_jit.jump(); |
3056 | isBoolean.link(&m_jit); |
3057 | m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); |
3058 | done.link(&m_jit); |
3059 | |
3060 | jsValueResult(resultTagGPR, resultPayloadGPR, node); |
3061 | break; |
3062 | } |
3063 | |
3064 | default: |
3065 | RELEASE_ASSERT_NOT_REACHED(); |
3066 | break; |
3067 | } |
3068 | break; |
3069 | } |
3070 | |
3071 | case ToPrimitive: { |
3072 | compileToPrimitive(node); |
3073 | break; |
3074 | } |
3075 | |
3076 | case ToNumber: { |
3077 | JSValueOperand argument(this, node->child1()); |
3078 | GPRTemporary resultTag(this, Reuse, argument, TagWord); |
3079 | GPRTemporary resultPayload(this, Reuse, argument, PayloadWord); |
3080 | |
3081 | GPRReg argumentPayloadGPR = argument.payloadGPR(); |
3082 | GPRReg argumentTagGPR = argument.tagGPR(); |
3083 | JSValueRegs argumentRegs = argument.jsValueRegs(); |
3084 | JSValueRegs resultRegs(resultTag.gpr(), resultPayload.gpr()); |
3085 | |
3086 | argument.use(); |
3087 | |
3088 | // We have several attempts to remove ToNumber. But ToNumber still exists. |
3089 | // It means that converting non-numbers to numbers by this ToNumber is not rare. |
3090 | // Instead of the slow path generator, we emit callOperation here. |
3091 | if (!(m_state.forNode(node->child1()).m_type & SpecBytecodeNumber)) { |
3092 | flushRegisters(); |
3093 | callOperation(operationToNumber, resultRegs, argumentRegs); |
3094 | m_jit.exceptionCheck(); |
3095 | } else { |
3096 | MacroAssembler::Jump notNumber; |
3097 | { |
3098 | GPRTemporary scratch(this); |
3099 | notNumber = m_jit.branchIfNotNumber(argument.jsValueRegs(), scratch.gpr()); |
3100 | } |
3101 | m_jit.move(argumentTagGPR, resultRegs.tagGPR()); |
3102 | m_jit.move(argumentPayloadGPR, resultRegs.payloadGPR()); |
3103 | MacroAssembler::Jump done = m_jit.jump(); |
3104 | |
3105 | notNumber.link(&m_jit); |
3106 | silentSpillAllRegisters(resultRegs); |
3107 | callOperation(operationToNumber, resultRegs, argumentRegs); |
3108 | silentFillAllRegisters(); |
3109 | m_jit.exceptionCheck(); |
3110 | |
3111 | done.link(&m_jit); |
3112 | } |
3113 | |
3114 | jsValueResult(resultRegs.tagGPR(), resultRegs.payloadGPR(), node, UseChildrenCalledExplicitly); |
3115 | break; |
3116 | } |
3117 | |
3118 | case ToString: |
3119 | case CallStringConstructor: |
3120 | case StringValueOf: { |
3121 | compileToStringOrCallStringConstructorOrStringValueOf(node); |
3122 | break; |
3123 | } |
3124 | |
3125 | case NewStringObject: { |
3126 | compileNewStringObject(node); |
3127 | break; |
3128 | } |
3129 | |
3130 | case NewSymbol: { |
3131 | compileNewSymbol(node); |
3132 | break; |
3133 | } |
3134 | |
3135 | case NewArray: { |
3136 | compileNewArray(node); |
3137 | break; |
3138 | } |
3139 | |
3140 | case NewArrayWithSpread: { |
3141 | compileNewArrayWithSpread(node); |
3142 | break; |
3143 | } |
3144 | |
3145 | case Spread: { |
3146 | compileSpread(node); |
3147 | break; |
3148 | } |
3149 | |
3150 | case NewArrayWithSize: { |
3151 | compileNewArrayWithSize(node); |
3152 | break; |
3153 | } |
3154 | |
3155 | case NewArrayBuffer: { |
3156 | compileNewArrayBuffer(node); |
3157 | break; |
3158 | } |
3159 | |
3160 | case NewTypedArray: { |
3161 | compileNewTypedArray(node); |
3162 | break; |
3163 | } |
3164 | |
3165 | case NewRegexp: { |
3166 | compileNewRegexp(node); |
3167 | break; |
3168 | } |
3169 | |
3170 | case ToObject: |
3171 | case CallObjectConstructor: { |
3172 | compileToObjectOrCallObjectConstructor(node); |
3173 | break; |
3174 | } |
3175 | |
3176 | case ToThis: { |
3177 | compileToThis(node); |
3178 | break; |
3179 | } |
3180 | |
3181 | case ObjectCreate: { |
3182 | compileObjectCreate(node); |
3183 | break; |
3184 | } |
3185 | |
3186 | case ObjectKeys: { |
3187 | compileObjectKeys(node); |
3188 | break; |
3189 | } |
3190 | |
3191 | case CreateThis: { |
3192 | compileCreateThis(node); |
3193 | break; |
3194 | } |
3195 | |
3196 | case NewObject: { |
3197 | compileNewObject(node); |
3198 | break; |
3199 | } |
3200 | |
3201 | case GetCallee: { |
3202 | compileGetCallee(node); |
3203 | break; |
3204 | } |
3205 | |
3206 | case SetCallee: { |
3207 | compileSetCallee(node); |
3208 | break; |
3209 | } |
3210 | |
3211 | case GetArgumentCountIncludingThis: { |
3212 | compileGetArgumentCountIncludingThis(node); |
3213 | break; |
3214 | } |
3215 | |
3216 | case SetArgumentCountIncludingThis: |
3217 | compileSetArgumentCountIncludingThis(node); |
3218 | break; |
3219 | |
3220 | case GetScope: |
3221 | compileGetScope(node); |
3222 | break; |
3223 | |
3224 | case SkipScope: |
3225 | compileSkipScope(node); |
3226 | break; |
3227 | |
3228 | case GetGlobalObject: |
3229 | compileGetGlobalObject(node); |
3230 | break; |
3231 | |
3232 | case GetGlobalThis: |
3233 | compileGetGlobalThis(node); |
3234 | break; |
3235 | |
3236 | case GetClosureVar: { |
3237 | compileGetClosureVar(node); |
3238 | break; |
3239 | } |
3240 | |
3241 | case PutClosureVar: { |
3242 | compilePutClosureVar(node); |
3243 | break; |
3244 | } |
3245 | |
3246 | case TryGetById: { |
3247 | compileGetById(node, AccessType::TryGet); |
3248 | break; |
3249 | } |
3250 | |
3251 | case GetByIdDirect: { |
3252 | compileGetById(node, AccessType::GetDirect); |
3253 | break; |
3254 | } |
3255 | |
3256 | case GetByIdDirectFlush: { |
3257 | compileGetByIdFlush(node, AccessType::GetDirect); |
3258 | break; |
3259 | } |
3260 | |
3261 | case GetById: { |
3262 | compileGetById(node, AccessType::Get); |
3263 | break; |
3264 | } |
3265 | |
3266 | case GetByIdFlush: { |
3267 | compileGetByIdFlush(node, AccessType::Get); |
3268 | break; |
3269 | } |
3270 | |
3271 | case GetByIdWithThis: { |
3272 | if (node->child1().useKind() == CellUse && node->child2().useKind() == CellUse) { |
3273 | SpeculateCellOperand base(this, node->child1()); |
3274 | SpeculateCellOperand thisValue(this, node->child2()); |
3275 | GPRTemporary resultTag(this); |
3276 | GPRTemporary resultPayload(this); |
3277 | |
3278 | GPRReg baseGPR = base.gpr(); |
3279 | GPRReg thisGPR = thisValue.gpr(); |
3280 | GPRReg resultTagGPR = resultTag.gpr(); |
3281 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
3282 | |
3283 | cachedGetByIdWithThis(node->origin.semantic, InvalidGPRReg, baseGPR, InvalidGPRReg, thisGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber()); |
3284 | |
3285 | jsValueResult(resultTagGPR, resultPayloadGPR, node); |
3286 | } else { |
3287 | JSValueOperand base(this, node->child1()); |
3288 | JSValueOperand thisValue(this, node->child2()); |
3289 | GPRTemporary resultTag(this); |
3290 | GPRTemporary resultPayload(this); |
3291 | |
3292 | GPRReg baseTagGPR = base.tagGPR(); |
3293 | GPRReg basePayloadGPR = base.payloadGPR(); |
3294 | GPRReg thisTagGPR = thisValue.tagGPR(); |
3295 | GPRReg thisPayloadGPR = thisValue.payloadGPR(); |
3296 | GPRReg resultTagGPR = resultTag.gpr(); |
3297 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
3298 | |
3299 | JITCompiler::JumpList notCellList; |
3300 | notCellList.append(m_jit.branchIfNotCell(base.jsValueRegs())); |
3301 | notCellList.append(m_jit.branchIfNotCell(thisValue.jsValueRegs())); |
3302 | |
3303 | cachedGetByIdWithThis(node->origin.semantic, baseTagGPR, basePayloadGPR, thisTagGPR, thisPayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCellList); |
3304 | |
3305 | jsValueResult(resultTagGPR, resultPayloadGPR, node); |
3306 | } |
3307 | |
3308 | break; |
3309 | } |
3310 | |
3311 | case GetArrayLength: |
3312 | compileGetArrayLength(node); |
3313 | break; |
3314 | |
3315 | case DeleteById: { |
3316 | compileDeleteById(node); |
3317 | break; |
3318 | } |
3319 | |
3320 | case DeleteByVal: { |
3321 | compileDeleteByVal(node); |
3322 | break; |
3323 | } |
3324 | |
3325 | case CheckCell: { |
3326 | compileCheckCell(node); |
3327 | break; |
3328 | } |
3329 | |
3330 | case CheckNotEmpty: { |
3331 | compileCheckNotEmpty(node); |
3332 | break; |
3333 | } |
3334 | |
3335 | case CheckStringIdent: |
3336 | compileCheckStringIdent(node); |
3337 | break; |
3338 | |
3339 | case GetExecutable: { |
3340 | compileGetExecutable(node); |
3341 | break; |
3342 | } |
3343 | |
3344 | case CheckStructure: { |
3345 | compileCheckStructure(node); |
3346 | break; |
3347 | } |
3348 | |
3349 | case PutStructure: { |
3350 | RegisteredStructure oldStructure = node->transition()->previous; |
3351 | RegisteredStructure newStructure = node->transition()->next; |
3352 | |
3353 | m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node); |
3354 | |
3355 | SpeculateCellOperand base(this, node->child1()); |
3356 | GPRReg baseGPR = base.gpr(); |
3357 | |
3358 | ASSERT_UNUSED(oldStructure, oldStructure->indexingMode() == newStructure->indexingMode()); |
3359 | ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type()); |
3360 | ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags()); |
3361 | m_jit.storePtr(TrustedImmPtr(newStructure), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset())); |
3362 | |
3363 | noResult(node); |
3364 | break; |
3365 | } |
3366 | |
3367 | case AllocatePropertyStorage: |
3368 | compileAllocatePropertyStorage(node); |
3369 | break; |
3370 | |
3371 | case ReallocatePropertyStorage: |
3372 | compileReallocatePropertyStorage(node); |
3373 | break; |
3374 | |
3375 | case NukeStructureAndSetButterfly: |
3376 | compileNukeStructureAndSetButterfly(node); |
3377 | break; |
3378 | |
3379 | case GetButterfly: |
3380 | compileGetButterfly(node); |
3381 | break; |
3382 | |
3383 | case GetIndexedPropertyStorage: { |
3384 | compileGetIndexedPropertyStorage(node); |
3385 | break; |
3386 | } |
3387 | |
3388 | case ConstantStoragePointer: { |
3389 | compileConstantStoragePointer(node); |
3390 | break; |
3391 | } |
3392 | |
3393 | case GetTypedArrayByteOffset: { |
3394 | compileGetTypedArrayByteOffset(node); |
3395 | break; |
3396 | } |
3397 | |
3398 | case GetPrototypeOf: { |
3399 | compileGetPrototypeOf(node); |
3400 | break; |
3401 | } |
3402 | |
3403 | case GetByOffset: { |
3404 | compileGetByOffset(node); |
3405 | break; |
3406 | } |
3407 | |
3408 | case GetGetterSetterByOffset: { |
3409 | StorageOperand storage(this, node->child1()); |
3410 | GPRTemporary resultPayload(this); |
3411 | |
3412 | GPRReg storageGPR = storage.gpr(); |
3413 | GPRReg resultPayloadGPR = resultPayload.gpr(); |
3414 | |
3415 | StorageAccessData& storageAccessData = node->storageAccessData(); |
3416 | |
3417 | m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); |
3418 | |
3419 | cellResult(resultPayloadGPR, node); |
3420 | break; |
3421 | } |
3422 | |
3423 | case MatchStructure: { |
3424 | compileMatchStructure(node); |
3425 | break; |
3426 | } |
3427 | |
3428 | case GetGetter: { |
3429 | compileGetGetter(node); |
3430 | break; |
3431 | } |
3432 | |
3433 | case GetSetter: { |
3434 | compileGetSetter(node); |
3435 | break; |
3436 | } |
3437 | |
3438 | case PutByOffset: { |
3439 | compilePutByOffset(node); |
3440 | break; |
3441 | } |
3442 | |
3443 | case PutByIdFlush: { |
3444 | compilePutByIdFlush(node); |
3445 | break; |
3446 | } |
3447 | |
3448 | case PutById: { |
3449 | compilePutById(node); |
3450 | break; |
3451 | } |
3452 | |
3453 | case PutByIdDirect: { |
3454 | compilePutByIdDirect(node); |
3455 | break; |
3456 | } |
3457 | |
3458 | case PutByIdWithThis: { |
3459 | compilePutByIdWithThis(node); |
3460 | break; |
3461 | } |
3462 | |
3463 | case PutGetterById: |
3464 | case PutSetterById: { |
3465 | compilePutAccessorById(node); |
3466 | break; |
3467 | } |
3468 | |
3469 | case PutGetterSetterById: { |
3470 | compilePutGetterSetterById(node); |
3471 | break; |
3472 | } |
3473 | |
3474 | case PutGetterByVal: |
3475 | case PutSetterByVal: { |
3476 | compilePutAccessorByVal(node); |
3477 | break; |
3478 | } |
3479 | |
3480 | case DefineDataProperty: { |
3481 | compileDefineDataProperty(node); |
3482 | break; |
3483 | } |
3484 | |
3485 | case DefineAccessorProperty: { |
3486 | compileDefineAccessorProperty(node); |
3487 | break; |
3488 | } |
3489 | |
3490 | case GetGlobalLexicalVariable: |
3491 | case GetGlobalVar: { |
3492 | compileGetGlobalVariable(node); |
3493 | break; |
3494 | } |
3495 | |
3496 | case PutGlobalVariable: { |
3497 | compilePutGlobalVariable(node); |
3498 | break; |
3499 | } |
3500 | |
3501 | case NotifyWrite: { |
3502 | compileNotifyWrite(node); |
3503 | break; |
3504 | } |
3505 | |
3506 | case ParseInt: { |
3507 | compileParseInt(node); |
3508 | break; |
3509 | } |
3510 | |
3511 | case CheckTypeInfoFlags: { |
3512 | compileCheckTypeInfoFlags(node); |
3513 | break; |
3514 | } |
3515 | |
3516 | case OverridesHasInstance: { |
3517 | compileOverridesHasInstance(node); |
3518 | break; |
3519 | } |
3520 | |
3521 | case InstanceOf: { |
3522 | compileInstanceOf(node); |
3523 | break; |
3524 | } |
3525 | |
3526 | case InstanceOfCustom: { |
3527 | compileInstanceOfCustom(node); |
3528 | break; |
3529 | } |
3530 | |
3531 | case IsEmpty: { |
3532 | JSValueOperand value(this, node->child1()); |
3533 | GPRTemporary result(this, Reuse, value, TagWord); |
3534 | m_jit.comparePtr(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::EmptyValueTag), result.gpr()); |
3535 | booleanResult(result.gpr(), node); |
3536 | break; |
3537 | } |
3538 | |
3539 | case IsUndefined: { |
3540 | JSValueOperand value(this, node->child1()); |
3541 | GPRTemporary result(this); |
3542 | GPRTemporary localGlobalObject(this); |
3543 | GPRTemporary remoteGlobalObject(this); |
3544 | |
3545 | JITCompiler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs()); |
3546 | |
3547 | m_jit.compare32(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::UndefinedTag), result.gpr()); |
3548 | JITCompiler::Jump done = m_jit.jump(); |
3549 | |
3550 | isCell.link(&m_jit); |
3551 | JITCompiler::Jump notMasqueradesAsUndefined; |
3552 | if (masqueradesAsUndefinedWatchpointIsStillValid()) { |
3553 | m_jit.move(TrustedImm32(0), result.gpr()); |
3554 | notMasqueradesAsUndefined = m_jit.jump(); |
3555 | } else { |
3556 | JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8( |
3557 | JITCompiler::NonZero, |
3558 | JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), |
3559 | TrustedImm32(MasqueradesAsUndefined)); |
3560 | m_jit.move(TrustedImm32(0), result.gpr()); |
3561 | notMasqueradesAsUndefined = m_jit.jump(); |
3562 | |
3563 | isMasqueradesAsUndefined.link(&m_jit); |
3564 | GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); |
3565 | GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); |
3566 | m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR); |
3567 | m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureIDOffset()), result.gpr()); |
3568 | m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR); |
3569 | m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr()); |
3570 | } |
3571 | |
3572 | notMasqueradesAsUndefined.link(&m_jit); |
3573 | done.link(&m_jit); |
3574 | booleanResult(result.gpr(), node); |
3575 | break; |
3576 | } |
3577 | |
3578 | case IsUndefinedOrNull: { |
3579 | JSValueOperand value(this, node->child1()); |
3580 | GPRTemporary result(this, Reuse, value, TagWord); |
3581 | |
3582 | GPRReg valueTagGPR = value.tagGPR(); |
3583 | GPRReg resultGPR = result.gpr(); |
3584 | |
3585 | m_jit.move(valueTagGPR, resultGPR); |
3586 | static_assert((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1), "" ); |
3587 | m_jit.or32(CCallHelpers::TrustedImm32(1), resultGPR); |
3588 | m_jit.compare32(CCallHelpers::Equal, resultGPR, CCallHelpers::TrustedImm32(JSValue::NullTag), resultGPR); |
3589 | |
3590 | booleanResult(resultGPR, node); |
3591 | break; |
3592 | } |
3593 | |
3594 | |
3595 | case IsBoolean: { |
3596 | JSValueOperand value(this, node->child1()); |
3597 | GPRTemporary result(this, Reuse, value, TagWord); |
3598 | |
3599 | m_jit.compare32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::BooleanTag), result.gpr()); |
3600 | booleanResult(result.gpr(), node); |
3601 | break; |
3602 | } |
3603 | |
3604 | case IsNumber: { |
3605 | JSValueOperand value(this, node->child1()); |
3606 | GPRTemporary result(this, Reuse, value, TagWord); |
3607 | |
3608 | m_jit.add32(TrustedImm32(1), value.tagGPR(), result.gpr()); |
3609 | m_jit.compare32(JITCompiler::Below, result.gpr(), JITCompiler::TrustedImm32(JSValue::LowestTag + 1), result.gpr()); |
3610 | booleanResult(result.gpr(), node); |
3611 | break; |
3612 | } |
3613 | |
3614 | case NumberIsInteger: { |
3615 | JSValueOperand input(this, node->child1()); |
3616 | JSValueRegs inputRegs = input.jsValueRegs(); |
3617 | flushRegisters(); |
3618 | GPRFlushedCallResult result(this); |
3619 | GPRReg resultGPR = result.gpr(); |
3620 | callOperation(operationNumberIsInteger, resultGPR, inputRegs); |
3621 | booleanResult(resultGPR, node); |
3622 | break; |
3623 | } |
3624 | |
3625 | case IsObject: { |
3626 | compileIsObject(node); |
3627 | break; |
3628 | } |
3629 | |
3630 | case IsObjectOrNull: { |
3631 | compileIsObjectOrNull(node); |
3632 | break; |
3633 | } |
3634 | |
3635 | case IsFunction: { |
3636 | compileIsFunction(node); |
3637 | break; |
3638 | } |
3639 | |
3640 | case IsCellWithType: { |
3641 | compileIsCellWithType(node); |
3642 | break; |
3643 | } |
3644 | |
3645 | case IsTypedArrayView: { |
3646 | compileIsTypedArrayView(node); |
3647 | break; |
3648 | } |
3649 | |
3650 | case TypeOf: { |
3651 | compileTypeOf(node); |
3652 | break; |
3653 | } |
3654 | |
3655 | case MapHash: { |
3656 | JSValueOperand input(this, node->child1()); |
3657 | |
3658 | JSValueRegs inputRegs = input.jsValueRegs(); |
3659 | |
3660 | flushRegisters(); |
3661 | GPRFlushedCallResult result(this); |
3662 | GPRReg resultGPR = result.gpr(); |
3663 | callOperation(operationMapHash, resultGPR, inputRegs); |
3664 | m_jit.exceptionCheck(); |
3665 | int32Result(resultGPR, node); |
3666 | break; |
3667 | } |
3668 | |
3669 | case NormalizeMapKey: { |
3670 | compileNormalizeMapKey(node); |
3671 | break; |
3672 | } |
3673 | |
3674 | case GetMapBucket: { |
3675 | SpeculateCellOperand map(this, node->child1()); |
3676 | JSValueOperand key(this, node->child2()); |
3677 | SpeculateInt32Operand hash(this, node->child3()); |
3678 | |
3679 | GPRReg mapGPR = map.gpr(); |
3680 | JSValueRegs keyRegs = key.jsValueRegs(); |
3681 | GPRReg hashGPR = hash.gpr(); |
3682 | |
3683 | if (node->child1().useKind() == MapObjectUse) |
3684 | speculateMapObject(node->child1(), mapGPR); |
3685 | else if (node->child1().useKind() == SetObjectUse) |
3686 | speculateSetObject(node->child1(), mapGPR); |
3687 | else |
3688 | RELEASE_ASSERT_NOT_REACHED(); |
3689 | |
3690 | flushRegisters(); |
3691 | GPRFlushedCallResult result(this); |
3692 | GPRReg resultGPR = result.gpr(); |
3693 | if (node->child1().useKind() == MapObjectUse) |
3694 | callOperation(operationJSMapFindBucket, resultGPR, mapGPR, keyRegs, hashGPR); |
3695 | else |
3696 | callOperation(operationJSSetFindBucket, resultGPR, mapGPR, keyRegs, hashGPR); |
3697 | m_jit.exceptionCheck(); |
3698 | cellResult(resultGPR, node); |
3699 | break; |
3700 | } |
3701 | |
3702 | case GetMapBucketHead: |
3703 | compileGetMapBucketHead(node); |
3704 | break; |
3705 | |
3706 | case GetMapBucketNext: |
3707 | compileGetMapBucketNext(node); |
3708 | break; |
3709 | |
3710 | case LoadKeyFromMapBucket: |
3711 | compileLoadKeyFromMapBucket(node); |
3712 | break; |
3713 | |
3714 | case LoadValueFromMapBucket: |
3715 | compileLoadValueFromMapBucket(node); |
3716 | break; |
3717 | |
3718 | case ExtractValueFromWeakMapGet: |
3719 | compileExtractValueFromWeakMapGet(node); |
3720 | break; |
3721 | |
3722 | case SetAdd: |
3723 | compileSetAdd(node); |
3724 | break; |
3725 | |
3726 | case MapSet: |
3727 | compileMapSet(node); |
3728 | break; |
3729 | |
3730 | case WeakMapGet: |
3731 | compileWeakMapGet(node); |
3732 | break; |
3733 | |
3734 | case WeakSetAdd: |
3735 | compileWeakSetAdd(node); |
3736 | break; |
3737 | |
3738 | case WeakMapSet: |
3739 | compileWeakMapSet(node); |
3740 | break; |
3741 | |
3742 | case Flush: |
3743 | break; |
3744 | |
3745 | case Call: |
3746 | case TailCall: |
3747 | case TailCallInlinedCaller: |
3748 | case Construct: |
3749 | case CallVarargs: |
3750 | case TailCallVarargs: |
3751 | case TailCallVarargsInlinedCaller: |
3752 | case ConstructVarargs: |
3753 | case CallForwardVarargs: |
3754 | case TailCallForwardVarargs: |
3755 | case TailCallForwardVarargsInlinedCaller: |
3756 | case ConstructForwardVarargs: |
3757 | case CallEval: |
3758 | case DirectCall: |
3759 | case DirectConstruct: |
3760 | case DirectTailCall: |
3761 | case DirectTailCallInlinedCaller: |
3762 | emitCall(node); |
3763 | break; |
3764 | |
3765 | case LoadVarargs: { |
3766 | compileLoadVarargs(node); |
3767 | break; |
3768 | } |
3769 | |
3770 | case ForwardVarargs: { |
3771 | compileForwardVarargs(node); |
3772 | break; |
3773 | } |
3774 | |
3775 | case CreateActivation: { |
3776 | compileCreateActivation(node); |
3777 | break; |
3778 | } |
3779 | |
3780 | case PushWithScope: { |
3781 | compilePushWithScope(node); |
3782 | break; |
3783 | } |
3784 | |
3785 | case CreateDirectArguments: { |
3786 | compileCreateDirectArguments(node); |
3787 | break; |
3788 | } |
3789 | |
3790 | case GetFromArguments: { |
3791 | compileGetFromArguments(node); |
3792 | break; |
3793 | } |
3794 | |
3795 | case PutToArguments: { |
3796 | compilePutToArguments(node); |
3797 | break; |
3798 | } |
3799 | |
3800 | case GetArgument: { |
3801 | compileGetArgument(node); |
3802 | break; |
3803 | } |
3804 | |
3805 | case CreateScopedArguments: { |
3806 | compileCreateScopedArguments(node); |
3807 | break; |
3808 | } |
3809 | |
3810 | case CreateClonedArguments: { |
3811 | compileCreateClonedArguments(node); |
3812 | break; |
3813 | } |
3814 | |
3815 | case CreateRest: { |
3816 | compileCreateRest(node); |
3817 | break; |
3818 | } |
3819 | |
3820 | case GetRestLength: { |
3821 | compileGetRestLength(node); |
3822 | break; |
3823 | } |
3824 | |
3825 | case NewFunction: |
3826 | case NewGeneratorFunction: |
3827 | case NewAsyncFunction: |
3828 | case NewAsyncGeneratorFunction: |
3829 | compileNewFunction(node); |
3830 | break; |
3831 | |
3832 | case SetFunctionName: |
3833 | compileSetFunctionName(node); |
3834 | break; |
3835 | |
3836 | case InById: |
3837 | compileInById(node); |
3838 | break; |
3839 | |
3840 | case InByVal: |
3841 | compileInByVal(node); |
3842 | break; |
3843 | |
3844 | case HasOwnProperty: { |
3845 | #if CPU(X86) |
3846 | ASSERT(node->child2().useKind() == UntypedUse); |
3847 | SpeculateCellOperand object(this, node->child1()); |
3848 | JSValueOperand key(this, node->child2()); |
3849 | GPRTemporary result(this, Reuse, object); |
3850 | |
3851 | JSValueRegs keyRegs = key.jsValueRegs(); |
3852 | GPRReg objectGPR = object.gpr(); |
3853 | GPRReg resultGPR = result.gpr(); |
3854 | |
3855 | speculateObject(node->child1()); |
3856 | |
3857 | flushRegisters(); |
3858 | callOperation(operationHasOwnProperty, resultGPR, objectGPR, keyRegs); |
3859 | booleanResult(resultGPR, node); |
3860 | #else |
3861 | SpeculateCellOperand object(this, node->child1()); |
3862 | GPRTemporary uniquedStringImpl(this); |
3863 | GPRTemporary temp(this); |
3864 | GPRTemporary hash(this); |
3865 | GPRTemporary structureID(this); |
3866 | GPRTemporary result(this); |
3867 | |
3868 | Optional<SpeculateCellOperand> keyAsCell; |
3869 | Optional<JSValueOperand> keyAsValue; |
3870 | JSValueRegs keyRegs; |
3871 | if (node->child2().useKind() == UntypedUse) { |
3872 | keyAsValue.emplace(this, node->child2()); |
3873 | keyRegs = keyAsValue->jsValueRegs(); |
3874 | } else { |
3875 | ASSERT(node->child2().useKind() == StringUse || node->child2().useKind() == SymbolUse); |
3876 | keyAsCell.emplace(this, node->child2()); |
3877 | keyRegs = JSValueRegs::payloadOnly(keyAsCell->gpr()); |
3878 | } |
3879 | |
3880 | GPRReg objectGPR = object.gpr(); |
3881 | GPRReg implGPR = uniquedStringImpl.gpr(); |
3882 | GPRReg tempGPR = temp.gpr(); |
3883 | GPRReg hashGPR = hash.gpr(); |
3884 | GPRReg structureIDGPR = structureID.gpr(); |
3885 | GPRReg resultGPR = result.gpr(); |
3886 | |
3887 | speculateObject(node->child1()); |
3888 | |
3889 | MacroAssembler::JumpList slowPath; |
3890 | switch (node->child2().useKind()) { |
3891 | case SymbolUse: { |
3892 | speculateSymbol(node->child2(), keyRegs.payloadGPR()); |
3893 | m_jit.loadPtr(MacroAssembler::Address(keyRegs.payloadGPR(), Symbol::offsetOfSymbolImpl()), implGPR); |
3894 | break; |
3895 | } |
3896 | case StringUse: { |
3897 | speculateString(node->child2(), keyRegs.payloadGPR()); |
3898 | m_jit.loadPtr(MacroAssembler::Address(keyRegs.payloadGPR(), JSString::offsetOfValue()), implGPR); |
3899 | slowPath.append(m_jit.branchIfRopeStringImpl(implGPR)); |
3900 | slowPath.append(m_jit.branchTest32( |
3901 | MacroAssembler::Zero, MacroAssembler::Address(implGPR, StringImpl::flagsOffset()), |
3902 | MacroAssembler::TrustedImm32(StringImpl::flagIsAtom()))); |
3903 | break; |
3904 | } |
3905 | case UntypedUse: { |
3906 | slowPath.append(m_jit.branchIfNotCell(keyRegs)); |
3907 | auto isNotString = m_jit.branchIfNotString(keyRegs.payloadGPR()); |
3908 | m_jit.loadPtr(MacroAssembler::Address(keyRegs.payloadGPR(), JSString::offsetOfValue()), implGPR); |
3909 | slowPath.append(m_jit.branchIfRopeStringImpl(implGPR)); |
3910 | slowPath.append(m_jit.branchTest32( |
3911 | MacroAssembler::Zero, MacroAssembler::Address(implGPR, StringImpl::flagsOffset()), |
3912 | MacroAssembler::TrustedImm32(StringImpl::flagIsAtom()))); |
3913 | auto hasUniquedImpl = m_jit.jump(); |
3914 | |
3915 | isNotString.link(&m_jit); |
3916 | slowPath.append(m_jit.branchIfNotSymbol(keyRegs.payloadGPR())); |
3917 | m_jit.loadPtr(MacroAssembler::Address(keyRegs.payloadGPR(), Symbol::offsetOfSymbolImpl()), implGPR); |
3918 | |
3919 | hasUniquedImpl.link(&m_jit); |
3920 | break; |
3921 | } |
3922 | default: |
3923 | RELEASE_ASSERT_NOT_REACHED(); |
3924 | } |
3925 | |
3926 | // Note that we don't test if the hash is zero here. AtomStringImpl's can't have a zero |
3927 | // hash, however, a SymbolImpl may. But, because this is a cache, we don't care. We only |
3928 | // ever load the result from the cache if the cache entry matches what we are querying for. |
3929 | // So we either get super lucky and use zero for the hash and somehow collide with the entity |
3930 | // we're looking for, or we realize we're comparing against another entity, and go to the |
3931 | // slow path anyways. |
3932 | m_jit.load32(MacroAssembler::Address(implGPR, UniquedStringImpl::flagsOffset()), hashGPR); |
3933 | m_jit.urshift32(MacroAssembler::TrustedImm32(StringImpl::s_flagCount), hashGPR); |
3934 | m_jit.load32(MacroAssembler::Address(objectGPR, JSCell::structureIDOffset()), structureIDGPR); |
3935 | m_jit.add32(structureIDGPR, hashGPR); |
3936 | m_jit.and32(TrustedImm32(HasOwnPropertyCache::mask), hashGPR); |
3937 | m_jit.mul32(TrustedImm32(sizeof(HasOwnPropertyCache::Entry)), hashGPR, hashGPR); |
3938 | ASSERT(m_jit.vm()->hasOwnPropertyCache()); |
3939 | m_jit.move(TrustedImmPtr(m_jit.vm()->hasOwnPropertyCache()), tempGPR); |
3940 | slowPath.append(m_jit.branchPtr(MacroAssembler::NotEqual, |
3941 | MacroAssembler::BaseIndex(tempGPR, hashGPR, MacroAssembler::TimesOne, HasOwnPropertyCache::Entry::offsetOfImpl()), implGPR)); |
3942 | m_jit.load8(MacroAssembler::BaseIndex(tempGPR, hashGPR, MacroAssembler::TimesOne, HasOwnPropertyCache::Entry::offsetOfResult()), resultGPR); |
3943 | m_jit.load32(MacroAssembler::BaseIndex(tempGPR, hashGPR, MacroAssembler::TimesOne, HasOwnPropertyCache::Entry::offsetOfStructureID()), tempGPR); |
3944 | slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, structureIDGPR)); |
3945 | auto done = m_jit.jump(); |
3946 | |
3947 | slowPath.link(&m_jit); |
3948 | silentSpillAllRegisters(resultGPR); |
3949 | if (node->child2().useKind() != UntypedUse) { |
3950 | m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), tempGPR); |
3951 | keyRegs = JSValueRegs(tempGPR, keyRegs.payloadGPR()); |
3952 | } |
3953 | callOperation(operationHasOwnProperty, resultGPR, objectGPR, keyRegs); |
3954 | silentFillAllRegisters(); |
3955 | m_jit.exceptionCheck(); |
3956 | |
3957 | done.link(&m_jit); |
3958 | booleanResult(resultGPR, node); |
3959 | #endif // CPU(X86) |
3960 | break; |
3961 | } |
3962 | |
3963 | case StoreBarrier: |
3964 | case FencedStoreBarrier: { |
3965 | compileStoreBarrier(node); |
3966 | break; |
3967 | } |
3968 | |
3969 | case GetEnumerableLength: { |
3970 | compileGetEnumerableLength(node); |
3971 | break; |
3972 | } |
3973 | case HasGenericProperty: { |
3974 | compileHasGenericProperty(node); |
3975 | break; |
3976 | } |
3977 | case HasStructureProperty: { |
3978 | compileHasStructureProperty(node); |
3979 | break; |
3980 | } |
3981 | case HasIndexedProperty: { |
3982 | compileHasIndexedProperty(node); |
3983 | break; |
3984 | } |
3985 | case GetDirectPname: { |
3986 | compileGetDirectPname(node); |
3987 | break; |
3988 | } |
3989 | case GetPropertyEnumerator: { |
3990 | compileGetPropertyEnumerator(node); |
3991 | break; |
3992 | } |
3993 | case GetEnumeratorStructurePname: |
3994 | case GetEnumeratorGenericPname: { |
3995 | compileGetEnumeratorPname(node); |
3996 | break; |
3997 | } |
3998 | case ToIndexString: { |
3999 | compileToIndexString(node); |
4000 | break; |
4001 | } |
4002 | case ProfileType: { |
4003 | compileProfileType(node); |
4004 | break; |
4005 | } |
4006 | case ProfileControlFlow: { |
4007 | GPRTemporary scratch1(this); |
4008 | BasicBlockLocation* basicBlockLocation = node->basicBlockLocation(); |
4009 | basicBlockLocation->emitExecuteCode(m_jit, scratch1.gpr()); |
4010 | noResult(node); |
4011 | break; |
4012 | } |
4013 | |
4014 | case LogShadowChickenPrologue: { |
4015 | compileLogShadowChickenPrologue(node); |
4016 | break; |
4017 | } |
4018 | |
4019 | case LogShadowChickenTail: { |
4020 | compileLogShadowChickenTail(node); |
4021 | break; |
4022 | } |
4023 | |
4024 | case ForceOSRExit: { |
4025 | terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); |
4026 | break; |
4027 | } |
4028 | |
4029 | case InvalidationPoint: |
4030 | emitInvalidationPoint(node); |
4031 | break; |
4032 | |
4033 | case CheckTraps: |
4034 | compileCheckTraps(node); |
4035 | break; |
4036 | |
4037 | case CountExecution: |
4038 | m_jit.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node->executionCounter()->address())); |
4039 | break; |
4040 | |
4041 | case SuperSamplerBegin: |
4042 | m_jit.add32(TrustedImm32(1), MacroAssembler::AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); |
4043 | break; |
4044 | |
4045 | case SuperSamplerEnd: |
4046 | m_jit.sub32(TrustedImm32(1), MacroAssembler::AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); |
4047 | break; |
4048 | |
4049 | case Phantom: |
4050 | case Check: |
4051 | case CheckVarargs: |
4052 | DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate); |
4053 | noResult(node); |
4054 | break; |
4055 | |
4056 | case PhantomLocal: |
4057 | case LoopHint: |
4058 | // This is a no-op. |
4059 | noResult(node); |
4060 | break; |
4061 | |
4062 | case MaterializeNewObject: |
4063 | compileMaterializeNewObject(node); |
4064 | break; |
4065 | |
4066 | case PutDynamicVar: { |
4067 | compilePutDynamicVar(node); |
4068 | break; |
4069 | } |
4070 | |
4071 | case GetDynamicVar: { |
4072 | compileGetDynamicVar(node); |
4073 | break; |
4074 | } |
4075 | |
4076 | case ResolveScopeForHoistingFuncDeclInEval: { |
4077 | compileResolveScopeForHoistingFuncDeclInEval(node); |
4078 | break; |
4079 | } |
4080 | |
4081 | case ResolveScope: { |
4082 | compileResolveScope(node); |
4083 | break; |
4084 | } |
4085 | |
4086 | case CallDOM: |
4087 | compileCallDOM(node); |
4088 | break; |
4089 | |
4090 | case CallDOMGetter: |
4091 | compileCallDOMGetter(node); |
4092 | break; |
4093 | |
4094 | case CheckSubClass: |
4095 | compileCheckSubClass(node); |
4096 | break; |
4097 | |
4098 | case Unreachable: |
4099 | unreachable(node); |
4100 | break; |
4101 | |
4102 | case ExtractCatchLocal: { |
4103 | compileExtractCatchLocal(node); |
4104 | break; |
4105 | } |
4106 | |
4107 | case ClearCatchLocals: |
4108 | compileClearCatchLocals(node); |
4109 | break; |
4110 | |
4111 | case CheckStructureOrEmpty: |
4112 | DFG_CRASH(m_jit.graph(), node, "CheckStructureOrEmpty only used in 64-bit DFG" ); |
4113 | break; |
4114 | |
4115 | case FilterCallLinkStatus: |
4116 | case FilterGetByIdStatus: |
4117 | case FilterPutByIdStatus: |
4118 | case FilterInByIdStatus: |
4119 | m_interpreter.filterICStatus(node); |
4120 | noResult(node); |
4121 | break; |
4122 | |
4123 | case LastNodeType: |
4124 | case Phi: |
4125 | case Upsilon: |
4126 | case ExtractOSREntryLocal: |
4127 | case CheckTierUpInLoop: |
4128 | case CheckTierUpAtReturn: |
4129 | case CheckTierUpAndOSREnter: |
4130 | case Int52Rep: |
4131 | case FiatInt52: |
4132 | case Int52Constant: |
4133 | case CheckInBounds: |
4134 | case ArithIMul: |
4135 | case MultiGetByOffset: |
4136 | case MultiPutByOffset: |
4137 | case CheckBadCell: |
4138 | case BottomValue: |
4139 | case PhantomNewObject: |
4140 | case PhantomNewFunction: |
4141 | case PhantomNewGeneratorFunction: |
4142 | case PhantomNewAsyncFunction: |
4143 | case PhantomNewAsyncGeneratorFunction: |
4144 | case PhantomCreateActivation: |
4145 | case PhantomNewRegexp: |
4146 | case PutHint: |
4147 | case CheckStructureImmediate: |
4148 | case MaterializeCreateActivation: |
4149 | case PutStack: |
4150 | case KillStack: |
4151 | case GetStack: |
4152 | case GetMyArgumentByVal: |
4153 | case GetMyArgumentByValOutOfBounds: |
4154 | case GetVectorLength: |
4155 | case PhantomCreateRest: |
4156 | case PhantomSpread: |
4157 | case PhantomNewArrayWithSpread: |
4158 | case PhantomNewArrayBuffer: |
4159 | case AtomicsIsLockFree: |
4160 | case AtomicsAdd: |
4161 | case AtomicsAnd: |
4162 | case AtomicsCompareExchange: |
4163 | case AtomicsExchange: |
4164 | case AtomicsLoad: |
4165 | case AtomicsOr: |
4166 | case AtomicsStore: |
4167 | case AtomicsSub: |
4168 | case AtomicsXor: |
4169 | case IdentityWithProfile: |
4170 | case InitializeEntrypointArguments: |
4171 | case EntrySwitch: |
4172 | case CPUIntrinsic: |
4173 | case AssertNotEmpty: |
4174 | case DataViewGetInt: |
4175 | case DataViewGetFloat: |
4176 | case DataViewSet: |
4177 | DFG_CRASH(m_jit.graph(), node, "unexpected node in DFG backend" ); |
4178 | break; |
4179 | } |
4180 | |
4181 | if (!m_compileOkay) |
4182 | return; |
4183 | |
4184 | if (node->hasResult() && node->mustGenerate()) |
4185 | use(node); |
4186 | } |
4187 | |
4188 | void SpeculativeJIT::moveTrueTo(GPRReg gpr) |
4189 | { |
4190 | m_jit.move(TrustedImm32(1), gpr); |
4191 | } |
4192 | |
4193 | void SpeculativeJIT::moveFalseTo(GPRReg gpr) |
4194 | { |
4195 | m_jit.move(TrustedImm32(0), gpr); |
4196 | } |
4197 | |
4198 | void SpeculativeJIT::blessBoolean(GPRReg) |
4199 | { |
4200 | } |
4201 | |
4202 | void SpeculativeJIT::compileArithRandom(Node* node) |
4203 | { |
4204 | JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic); |
4205 | |
4206 | flushRegisters(); |
4207 | |
4208 | FPRResult result(this); |
4209 | callOperation(operationRandom, result.fpr(), globalObject); |
4210 | // operationRandom does not raise any exception. |
4211 | doubleResult(result.fpr(), node); |
4212 | } |
4213 | |
4214 | #endif |
4215 | |
4216 | } } // namespace JSC::DFG |
4217 | |
4218 | #endif |
4219 | |