1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | |
28 | #if ENABLE(JIT) |
29 | #include "JIT.h" |
30 | |
31 | #include "CodeBlock.h" |
32 | #include "DirectArguments.h" |
33 | #include "GCAwareJITStubRoutine.h" |
34 | #include "GetterSetter.h" |
35 | #include "InterpreterInlines.h" |
36 | #include "JITInlines.h" |
37 | #include "JSArray.h" |
38 | #include "JSFunction.h" |
39 | #include "JSLexicalEnvironment.h" |
40 | #include "LinkBuffer.h" |
41 | #include "OpcodeInlines.h" |
42 | #include "ResultType.h" |
43 | #include "ScopedArguments.h" |
44 | #include "ScopedArgumentsTable.h" |
45 | #include "SlowPathCall.h" |
46 | #include "StructureStubInfo.h" |
47 | #include "ThunkGenerators.h" |
48 | #include <wtf/ScopedLambda.h> |
49 | #include <wtf/StringPrintStream.h> |
50 | |
51 | |
52 | namespace JSC { |
53 | #if USE(JSVALUE64) |
54 | |
55 | void JIT::emit_op_get_by_val(const Instruction* currentInstruction) |
56 | { |
57 | auto bytecode = currentInstruction->as<OpGetByVal>(); |
58 | auto& metadata = bytecode.metadata(m_codeBlock); |
59 | int dst = bytecode.m_dst.offset(); |
60 | int base = bytecode.m_base.offset(); |
61 | int property = bytecode.m_property.offset(); |
62 | ArrayProfile* profile = &metadata.m_arrayProfile; |
63 | ByValInfo* byValInfo = m_codeBlock->addByValInfo(); |
64 | |
65 | emitGetVirtualRegister(base, regT0); |
66 | bool propertyNameIsIntegerConstant = isOperandConstantInt(property); |
67 | if (propertyNameIsIntegerConstant) |
68 | move(Imm32(getOperandConstantInt(property)), regT1); |
69 | else |
70 | emitGetVirtualRegister(property, regT1); |
71 | |
72 | emitJumpSlowCaseIfNotJSCell(regT0, base); |
73 | |
74 | PatchableJump notIndex; |
75 | if (!propertyNameIsIntegerConstant) { |
76 | notIndex = emitPatchableJumpIfNotInt(regT1); |
77 | addSlowCase(notIndex); |
78 | |
79 | // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. |
80 | // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if |
81 | // number was signed since m_vectorLength is always less than intmax (since the total allocation |
82 | // size is always less than 4Gb). As such zero extending will have been correct (and extending the value |
83 | // to 64-bits is necessary since it's used in the address calculation). We zero extend rather than sign |
84 | // extending since it makes it easier to re-tag the value in the slow case. |
85 | zeroExtend32ToPtr(regT1, regT1); |
86 | } |
87 | |
88 | emitArrayProfilingSiteWithCell(regT0, regT2, profile); |
89 | and32(TrustedImm32(IndexingShapeMask), regT2); |
90 | |
91 | PatchableJump badType; |
92 | JumpList slowCases; |
93 | |
94 | JITArrayMode mode = chooseArrayMode(profile); |
95 | switch (mode) { |
96 | case JITInt32: |
97 | slowCases = emitInt32GetByVal(currentInstruction, badType); |
98 | break; |
99 | case JITDouble: |
100 | slowCases = emitDoubleGetByVal(currentInstruction, badType); |
101 | break; |
102 | case JITContiguous: |
103 | slowCases = emitContiguousGetByVal(currentInstruction, badType); |
104 | break; |
105 | case JITArrayStorage: |
106 | slowCases = emitArrayStorageGetByVal(currentInstruction, badType); |
107 | break; |
108 | default: |
109 | CRASH(); |
110 | break; |
111 | } |
112 | |
113 | addSlowCase(badType); |
114 | addSlowCase(slowCases); |
115 | |
116 | Label done = label(); |
117 | |
118 | if (!ASSERT_DISABLED) { |
119 | Jump resultOK = branchIfNotEmpty(regT0); |
120 | abortWithReason(JITGetByValResultIsNotEmpty); |
121 | resultOK.link(this); |
122 | } |
123 | |
124 | emitValueProfilingSite(metadata); |
125 | emitPutVirtualRegister(dst); |
126 | |
127 | Label nextHotPath = label(); |
128 | |
129 | m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath)); |
130 | } |
131 | |
132 | JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, OpGetByVal bytecode, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases) |
133 | { |
134 | // base: regT0 |
135 | // property: regT1 |
136 | // scratch: regT3 |
137 | |
138 | int dst = bytecode.m_dst.offset(); |
139 | |
140 | slowCases.append(branchIfNotCell(regT1)); |
141 | emitByValIdentifierCheck(byValInfo, regT1, regT3, propertyName, slowCases); |
142 | |
143 | JITGetByIdGenerator gen( |
144 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), |
145 | propertyName.impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::Get); |
146 | gen.generateFastPath(*this); |
147 | |
148 | fastDoneCase = jump(); |
149 | |
150 | Label coldPathBegin = label(); |
151 | gen.slowPathJump().link(this); |
152 | |
153 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl()); |
154 | gen.reportSlowPathCall(coldPathBegin, call); |
155 | slowDoneCase = jump(); |
156 | |
157 | return gen; |
158 | } |
159 | |
160 | void JIT::emitSlow_op_get_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
161 | { |
162 | auto bytecode = currentInstruction->as<OpGetByVal>(); |
163 | int dst = bytecode.m_dst.offset(); |
164 | int base = bytecode.m_base.offset(); |
165 | int property = bytecode.m_property.offset(); |
166 | ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; |
167 | |
168 | linkSlowCaseIfNotJSCell(iter, base); // base cell check |
169 | |
170 | if (!isOperandConstantInt(property)) |
171 | linkSlowCase(iter); // property int32 check |
172 | Jump nonCell = jump(); |
173 | linkSlowCase(iter); // base array check |
174 | Jump notString = branchIfNotString(regT0); |
175 | emitNakedCall(CodeLocationLabel<NoPtrTag>(m_vm->getCTIStub(stringGetByValGenerator).retaggedCode<NoPtrTag>())); |
176 | Jump failed = branchTest64(Zero, regT0); |
177 | emitPutVirtualRegister(dst, regT0); |
178 | emitJumpSlowToHot(jump(), currentInstruction->size()); |
179 | failed.link(this); |
180 | notString.link(this); |
181 | nonCell.link(this); |
182 | |
183 | linkSlowCase(iter); // vector length check |
184 | linkSlowCase(iter); // empty value |
185 | |
186 | Label slowPath = label(); |
187 | |
188 | emitGetVirtualRegister(base, regT0); |
189 | emitGetVirtualRegister(property, regT1); |
190 | Call call = callOperation(operationGetByValOptimize, dst, regT0, regT1, byValInfo); |
191 | |
192 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; |
193 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; |
194 | m_byValInstructionIndex++; |
195 | |
196 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
197 | } |
198 | |
199 | void JIT::emit_op_put_by_val_direct(const Instruction* currentInstruction) |
200 | { |
201 | emit_op_put_by_val<OpPutByValDirect>(currentInstruction); |
202 | } |
203 | |
204 | template<typename Op> |
205 | void JIT::emit_op_put_by_val(const Instruction* currentInstruction) |
206 | { |
207 | auto bytecode = currentInstruction->as<Op>(); |
208 | auto& metadata = bytecode.metadata(m_codeBlock); |
209 | int base = bytecode.m_base.offset(); |
210 | int property = bytecode.m_property.offset(); |
211 | ArrayProfile* profile = &metadata.m_arrayProfile; |
212 | ByValInfo* byValInfo = m_codeBlock->addByValInfo(); |
213 | |
214 | emitGetVirtualRegister(base, regT0); |
215 | bool propertyNameIsIntegerConstant = isOperandConstantInt(property); |
216 | if (propertyNameIsIntegerConstant) |
217 | move(Imm32(getOperandConstantInt(property)), regT1); |
218 | else |
219 | emitGetVirtualRegister(property, regT1); |
220 | |
221 | emitJumpSlowCaseIfNotJSCell(regT0, base); |
222 | PatchableJump notIndex; |
223 | if (!propertyNameIsIntegerConstant) { |
224 | notIndex = emitPatchableJumpIfNotInt(regT1); |
225 | addSlowCase(notIndex); |
226 | // See comment in op_get_by_val. |
227 | zeroExtend32ToPtr(regT1, regT1); |
228 | } |
229 | emitArrayProfilingSiteWithCell(regT0, regT2, profile); |
230 | |
231 | PatchableJump badType; |
232 | JumpList slowCases; |
233 | |
234 | // FIXME: Maybe we should do this inline? |
235 | addSlowCase(branchTest32(NonZero, regT2, TrustedImm32(CopyOnWrite))); |
236 | and32(TrustedImm32(IndexingShapeMask), regT2); |
237 | |
238 | JITArrayMode mode = chooseArrayMode(profile); |
239 | switch (mode) { |
240 | case JITInt32: |
241 | slowCases = emitInt32PutByVal(bytecode, badType); |
242 | break; |
243 | case JITDouble: |
244 | slowCases = emitDoublePutByVal(bytecode, badType); |
245 | break; |
246 | case JITContiguous: |
247 | slowCases = emitContiguousPutByVal(bytecode, badType); |
248 | break; |
249 | case JITArrayStorage: |
250 | slowCases = emitArrayStoragePutByVal(bytecode, badType); |
251 | break; |
252 | default: |
253 | CRASH(); |
254 | break; |
255 | } |
256 | |
257 | addSlowCase(badType); |
258 | addSlowCase(slowCases); |
259 | |
260 | Label done = label(); |
261 | |
262 | m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done)); |
263 | } |
264 | |
265 | template<typename Op> |
266 | JIT::JumpList JIT::emitGenericContiguousPutByVal(Op bytecode, PatchableJump& badType, IndexingType indexingShape) |
267 | { |
268 | auto& metadata = bytecode.metadata(m_codeBlock); |
269 | int value = bytecode.m_value.offset(); |
270 | ArrayProfile* profile = &metadata.m_arrayProfile; |
271 | |
272 | JumpList slowCases; |
273 | |
274 | badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape)); |
275 | |
276 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); |
277 | Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())); |
278 | |
279 | Label storeResult = label(); |
280 | emitGetVirtualRegister(value, regT3); |
281 | switch (indexingShape) { |
282 | case Int32Shape: |
283 | slowCases.append(branchIfNotInt32(regT3)); |
284 | store64(regT3, BaseIndex(regT2, regT1, TimesEight)); |
285 | break; |
286 | case DoubleShape: { |
287 | Jump notInt = branchIfNotInt32(regT3); |
288 | convertInt32ToDouble(regT3, fpRegT0); |
289 | Jump ready = jump(); |
290 | notInt.link(this); |
291 | add64(tagTypeNumberRegister, regT3); |
292 | move64ToDouble(regT3, fpRegT0); |
293 | slowCases.append(branchIfNaN(fpRegT0)); |
294 | ready.link(this); |
295 | storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight)); |
296 | break; |
297 | } |
298 | case ContiguousShape: |
299 | store64(regT3, BaseIndex(regT2, regT1, TimesEight)); |
300 | emitWriteBarrier(bytecode.m_base.offset(), value, ShouldFilterValue); |
301 | break; |
302 | default: |
303 | CRASH(); |
304 | break; |
305 | } |
306 | |
307 | Jump done = jump(); |
308 | outOfBounds.link(this); |
309 | |
310 | slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength()))); |
311 | |
312 | emitArrayProfileStoreToHoleSpecialCase(profile); |
313 | |
314 | add32(TrustedImm32(1), regT1, regT3); |
315 | store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength())); |
316 | jump().linkTo(storeResult, this); |
317 | |
318 | done.link(this); |
319 | |
320 | return slowCases; |
321 | } |
322 | |
323 | template<typename Op> |
324 | JIT::JumpList JIT::emitArrayStoragePutByVal(Op bytecode, PatchableJump& badType) |
325 | { |
326 | auto& metadata = bytecode.metadata(m_codeBlock); |
327 | int value = bytecode.m_value.offset(); |
328 | ArrayProfile* profile = &metadata.m_arrayProfile; |
329 | |
330 | JumpList slowCases; |
331 | |
332 | badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape)); |
333 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); |
334 | slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset()))); |
335 | |
336 | Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset())); |
337 | |
338 | Label storeResult(this); |
339 | emitGetVirtualRegister(value, regT3); |
340 | store64(regT3, BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset())); |
341 | emitWriteBarrier(bytecode.m_base.offset(), value, ShouldFilterValue); |
342 | Jump end = jump(); |
343 | |
344 | empty.link(this); |
345 | emitArrayProfileStoreToHoleSpecialCase(profile); |
346 | add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset())); |
347 | branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this); |
348 | |
349 | add32(TrustedImm32(1), regT1); |
350 | store32(regT1, Address(regT2, ArrayStorage::lengthOffset())); |
351 | sub32(TrustedImm32(1), regT1); |
352 | jump().linkTo(storeResult, this); |
353 | |
354 | end.link(this); |
355 | |
356 | return slowCases; |
357 | } |
358 | |
359 | template<typename Op> |
360 | JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Op bytecode, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases) |
361 | { |
362 | // base: regT0 |
363 | // property: regT1 |
364 | // scratch: regT2 |
365 | |
366 | int base = bytecode.m_base.offset(); |
367 | int value = bytecode.m_value.offset(); |
368 | |
369 | slowCases.append(branchIfNotCell(regT1)); |
370 | emitByValIdentifierCheck(byValInfo, regT1, regT1, propertyName, slowCases); |
371 | |
372 | // Write barrier breaks the registers. So after issuing the write barrier, |
373 | // reload the registers. |
374 | emitGetVirtualRegisters(base, regT0, value, regT1); |
375 | |
376 | JITPutByIdGenerator gen( |
377 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), |
378 | JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), putKind); |
379 | gen.generateFastPath(*this); |
380 | emitWriteBarrier(base, value, ShouldFilterBase); |
381 | doneCases.append(jump()); |
382 | |
383 | Label coldPathBegin = label(); |
384 | gen.slowPathJump().link(this); |
385 | |
386 | Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, propertyName.impl()); |
387 | gen.reportSlowPathCall(coldPathBegin, call); |
388 | doneCases.append(jump()); |
389 | |
390 | return gen; |
391 | } |
392 | |
393 | void JIT::emitSlow_op_put_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
394 | { |
395 | bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct; |
396 | int base; |
397 | int property; |
398 | int value; |
399 | |
400 | auto load = [&](auto bytecode) { |
401 | base = bytecode.m_base.offset(); |
402 | property = bytecode.m_property.offset(); |
403 | value = bytecode.m_value.offset(); |
404 | }; |
405 | |
406 | if (isDirect) |
407 | load(currentInstruction->as<OpPutByValDirect>()); |
408 | else |
409 | load(currentInstruction->as<OpPutByVal>()); |
410 | |
411 | ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; |
412 | |
413 | linkAllSlowCases(iter); |
414 | Label slowPath = label(); |
415 | |
416 | emitGetVirtualRegister(base, regT0); |
417 | emitGetVirtualRegister(property, regT1); |
418 | emitGetVirtualRegister(value, regT2); |
419 | Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT0, regT1, regT2, byValInfo); |
420 | |
421 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; |
422 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; |
423 | m_byValInstructionIndex++; |
424 | } |
425 | |
426 | void JIT::emit_op_put_getter_by_id(const Instruction* currentInstruction) |
427 | { |
428 | auto bytecode = currentInstruction->as<OpPutGetterById>(); |
429 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
430 | int32_t options = bytecode.m_attributes; |
431 | emitGetVirtualRegister(bytecode.m_accessor.offset(), regT1); |
432 | callOperation(operationPutGetterById, regT0, m_codeBlock->identifier(bytecode.m_property).impl(), options, regT1); |
433 | } |
434 | |
435 | void JIT::emit_op_put_setter_by_id(const Instruction* currentInstruction) |
436 | { |
437 | auto bytecode = currentInstruction->as<OpPutSetterById>(); |
438 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
439 | int32_t options = bytecode.m_attributes; |
440 | emitGetVirtualRegister(bytecode.m_accessor.offset(), regT1); |
441 | callOperation(operationPutSetterById, regT0, m_codeBlock->identifier(bytecode.m_property).impl(), options, regT1); |
442 | } |
443 | |
444 | void JIT::emit_op_put_getter_setter_by_id(const Instruction* currentInstruction) |
445 | { |
446 | auto bytecode = currentInstruction->as<OpPutGetterSetterById>(); |
447 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
448 | int32_t attribute = bytecode.m_attributes; |
449 | emitGetVirtualRegister(bytecode.m_getter.offset(), regT1); |
450 | emitGetVirtualRegister(bytecode.m_setter.offset(), regT2); |
451 | callOperation(operationPutGetterSetter, regT0, m_codeBlock->identifier(bytecode.m_property).impl(), attribute, regT1, regT2); |
452 | } |
453 | |
454 | void JIT::emit_op_put_getter_by_val(const Instruction* currentInstruction) |
455 | { |
456 | auto bytecode = currentInstruction->as<OpPutGetterByVal>(); |
457 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
458 | emitGetVirtualRegister(bytecode.m_property.offset(), regT1); |
459 | int32_t attributes = bytecode.m_attributes; |
460 | emitGetVirtualRegister(bytecode.m_accessor, regT2); |
461 | callOperation(operationPutGetterByVal, regT0, regT1, attributes, regT2); |
462 | } |
463 | |
464 | void JIT::emit_op_put_setter_by_val(const Instruction* currentInstruction) |
465 | { |
466 | auto bytecode = currentInstruction->as<OpPutSetterByVal>(); |
467 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
468 | emitGetVirtualRegister(bytecode.m_property.offset(), regT1); |
469 | int32_t attributes = bytecode.m_attributes; |
470 | emitGetVirtualRegister(bytecode.m_accessor.offset(), regT2); |
471 | callOperation(operationPutSetterByVal, regT0, regT1, attributes, regT2); |
472 | } |
473 | |
474 | void JIT::emit_op_del_by_id(const Instruction* currentInstruction) |
475 | { |
476 | auto bytecode = currentInstruction->as<OpDelById>(); |
477 | int dst = bytecode.m_dst.offset(); |
478 | int base = bytecode.m_base.offset(); |
479 | int property = bytecode.m_property; |
480 | emitGetVirtualRegister(base, regT0); |
481 | callOperation(operationDeleteByIdJSResult, dst, regT0, m_codeBlock->identifier(property).impl()); |
482 | } |
483 | |
484 | void JIT::emit_op_del_by_val(const Instruction* currentInstruction) |
485 | { |
486 | auto bytecode = currentInstruction->as<OpDelByVal>(); |
487 | int dst = bytecode.m_dst.offset(); |
488 | int base = bytecode.m_base.offset(); |
489 | int property = bytecode.m_property.offset(); |
490 | emitGetVirtualRegister(base, regT0); |
491 | emitGetVirtualRegister(property, regT1); |
492 | callOperation(operationDeleteByValJSResult, dst, regT0, regT1); |
493 | } |
494 | |
495 | void JIT::emit_op_try_get_by_id(const Instruction* currentInstruction) |
496 | { |
497 | auto bytecode = currentInstruction->as<OpTryGetById>(); |
498 | int resultVReg = bytecode.m_dst.offset(); |
499 | int baseVReg = bytecode.m_base.offset(); |
500 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
501 | |
502 | emitGetVirtualRegister(baseVReg, regT0); |
503 | |
504 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
505 | |
506 | JITGetByIdGenerator gen( |
507 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), |
508 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::TryGet); |
509 | gen.generateFastPath(*this); |
510 | addSlowCase(gen.slowPathJump()); |
511 | m_getByIds.append(gen); |
512 | |
513 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
514 | emitPutVirtualRegister(resultVReg); |
515 | } |
516 | |
517 | void JIT::emitSlow_op_try_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
518 | { |
519 | linkAllSlowCases(iter); |
520 | |
521 | auto bytecode = currentInstruction->as<OpTryGetById>(); |
522 | int resultVReg = bytecode.m_dst.offset(); |
523 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
524 | |
525 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; |
526 | |
527 | Label coldPathBegin = label(); |
528 | |
529 | Call call = callOperation(operationTryGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); |
530 | |
531 | gen.reportSlowPathCall(coldPathBegin, call); |
532 | } |
533 | |
534 | void JIT::emit_op_get_by_id_direct(const Instruction* currentInstruction) |
535 | { |
536 | auto bytecode = currentInstruction->as<OpGetByIdDirect>(); |
537 | int resultVReg = bytecode.m_dst.offset(); |
538 | int baseVReg = bytecode.m_base.offset(); |
539 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
540 | |
541 | emitGetVirtualRegister(baseVReg, regT0); |
542 | |
543 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
544 | |
545 | JITGetByIdGenerator gen( |
546 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), |
547 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::GetDirect); |
548 | gen.generateFastPath(*this); |
549 | addSlowCase(gen.slowPathJump()); |
550 | m_getByIds.append(gen); |
551 | |
552 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
553 | emitPutVirtualRegister(resultVReg); |
554 | } |
555 | |
556 | void JIT::emitSlow_op_get_by_id_direct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
557 | { |
558 | linkAllSlowCases(iter); |
559 | |
560 | auto bytecode = currentInstruction->as<OpGetByIdDirect>(); |
561 | int resultVReg = bytecode.m_dst.offset(); |
562 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
563 | |
564 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; |
565 | |
566 | Label coldPathBegin = label(); |
567 | |
568 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdDirectOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); |
569 | |
570 | gen.reportSlowPathCall(coldPathBegin, call); |
571 | } |
572 | |
573 | void JIT::emit_op_get_by_id(const Instruction* currentInstruction) |
574 | { |
575 | auto bytecode = currentInstruction->as<OpGetById>(); |
576 | auto& metadata = bytecode.metadata(m_codeBlock); |
577 | int resultVReg = bytecode.m_dst.offset(); |
578 | int baseVReg = bytecode.m_base.offset(); |
579 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
580 | |
581 | emitGetVirtualRegister(baseVReg, regT0); |
582 | |
583 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
584 | |
585 | if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) { |
586 | Jump notArrayLengthMode = branch8(NotEqual, AbsoluteAddress(&metadata.m_modeMetadata.mode), TrustedImm32(static_cast<uint8_t>(GetByIdMode::ArrayLength))); |
587 | emitArrayProfilingSiteWithCell(regT0, regT1, &metadata.m_modeMetadata.arrayLengthMode.arrayProfile); |
588 | notArrayLengthMode.link(this); |
589 | } |
590 | |
591 | JITGetByIdGenerator gen( |
592 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), |
593 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::Get); |
594 | gen.generateFastPath(*this); |
595 | addSlowCase(gen.slowPathJump()); |
596 | m_getByIds.append(gen); |
597 | |
598 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
599 | emitPutVirtualRegister(resultVReg); |
600 | } |
601 | |
602 | void JIT::emit_op_get_by_id_with_this(const Instruction* currentInstruction) |
603 | { |
604 | auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); |
605 | int resultVReg = bytecode.m_dst.offset(); |
606 | int baseVReg = bytecode.m_base.offset(); |
607 | int thisVReg = bytecode.m_thisValue.offset(); |
608 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
609 | |
610 | emitGetVirtualRegister(baseVReg, regT0); |
611 | emitGetVirtualRegister(thisVReg, regT1); |
612 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
613 | emitJumpSlowCaseIfNotJSCell(regT1, thisVReg); |
614 | |
615 | JITGetByIdWithThisGenerator gen( |
616 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), |
617 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), JSValueRegs(regT1), AccessType::GetWithThis); |
618 | gen.generateFastPath(*this); |
619 | addSlowCase(gen.slowPathJump()); |
620 | m_getByIdsWithThis.append(gen); |
621 | |
622 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
623 | emitPutVirtualRegister(resultVReg); |
624 | } |
625 | |
626 | void JIT::emitSlow_op_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
627 | { |
628 | linkAllSlowCases(iter); |
629 | |
630 | auto bytecode = currentInstruction->as<OpGetById>(); |
631 | int resultVReg = bytecode.m_dst.offset(); |
632 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
633 | |
634 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; |
635 | |
636 | Label coldPathBegin = label(); |
637 | |
638 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); |
639 | |
640 | gen.reportSlowPathCall(coldPathBegin, call); |
641 | } |
642 | |
643 | void JIT::emitSlow_op_get_by_id_with_this(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
644 | { |
645 | linkAllSlowCases(iter); |
646 | |
647 | auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); |
648 | int resultVReg = bytecode.m_dst.offset(); |
649 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
650 | |
651 | JITGetByIdWithThisGenerator& gen = m_getByIdsWithThis[m_getByIdWithThisIndex++]; |
652 | |
653 | Label coldPathBegin = label(); |
654 | |
655 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), regT0, regT1, ident->impl()); |
656 | |
657 | gen.reportSlowPathCall(coldPathBegin, call); |
658 | } |
659 | |
660 | void JIT::emit_op_put_by_id(const Instruction* currentInstruction) |
661 | { |
662 | auto bytecode = currentInstruction->as<OpPutById>(); |
663 | int baseVReg = bytecode.m_base.offset(); |
664 | int valueVReg = bytecode.m_value.offset(); |
665 | bool direct = !!(bytecode.m_flags & PutByIdIsDirect); |
666 | |
667 | // In order to be able to patch both the Structure, and the object offset, we store one pointer, |
668 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code |
669 | // such that the Structure & offset are always at the same distance from this. |
670 | |
671 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); |
672 | |
673 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
674 | |
675 | JITPutByIdGenerator gen( |
676 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), |
677 | JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), |
678 | direct ? Direct : NotDirect); |
679 | |
680 | gen.generateFastPath(*this); |
681 | addSlowCase(gen.slowPathJump()); |
682 | |
683 | emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase); |
684 | |
685 | m_putByIds.append(gen); |
686 | } |
687 | |
688 | void JIT::emitSlow_op_put_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
689 | { |
690 | linkAllSlowCases(iter); |
691 | |
692 | auto bytecode = currentInstruction->as<OpPutById>(); |
693 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
694 | |
695 | Label coldPathBegin(this); |
696 | |
697 | JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++]; |
698 | |
699 | Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, ident->impl()); |
700 | |
701 | gen.reportSlowPathCall(coldPathBegin, call); |
702 | } |
703 | |
704 | void JIT::emit_op_in_by_id(const Instruction* currentInstruction) |
705 | { |
706 | auto bytecode = currentInstruction->as<OpInById>(); |
707 | int resultVReg = bytecode.m_dst.offset(); |
708 | int baseVReg = bytecode.m_base.offset(); |
709 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
710 | |
711 | emitGetVirtualRegister(baseVReg, regT0); |
712 | |
713 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
714 | |
715 | JITInByIdGenerator gen( |
716 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(), |
717 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0)); |
718 | gen.generateFastPath(*this); |
719 | addSlowCase(gen.slowPathJump()); |
720 | m_inByIds.append(gen); |
721 | |
722 | emitPutVirtualRegister(resultVReg); |
723 | } |
724 | |
725 | void JIT::emitSlow_op_in_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
726 | { |
727 | linkAllSlowCases(iter); |
728 | |
729 | auto bytecode = currentInstruction->as<OpInById>(); |
730 | int resultVReg = bytecode.m_dst.offset(); |
731 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
732 | |
733 | JITInByIdGenerator& gen = m_inByIds[m_inByIdIndex++]; |
734 | |
735 | Label coldPathBegin = label(); |
736 | |
737 | Call call = callOperation(operationInByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); |
738 | |
739 | gen.reportSlowPathCall(coldPathBegin, call); |
740 | } |
741 | |
742 | void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks) |
743 | { |
744 | if (!needsVarInjectionChecks) |
745 | return; |
746 | addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated))); |
747 | } |
748 | |
749 | void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth) |
750 | { |
751 | emitVarInjectionCheck(needsVarInjectionChecks); |
752 | emitGetVirtualRegister(scope, regT0); |
753 | for (unsigned i = 0; i < depth; ++i) |
754 | loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); |
755 | emitPutVirtualRegister(dst); |
756 | } |
757 | |
758 | void JIT::emit_op_resolve_scope(const Instruction* currentInstruction) |
759 | { |
760 | auto bytecode = currentInstruction->as<OpResolveScope>(); |
761 | auto& metadata = bytecode.metadata(m_codeBlock); |
762 | int dst = bytecode.m_dst.offset(); |
763 | int scope = bytecode.m_scope.offset(); |
764 | ResolveType resolveType = metadata.m_resolveType; |
765 | unsigned depth = metadata.m_localScopeDepth; |
766 | |
767 | auto emitCode = [&] (ResolveType resolveType) { |
768 | switch (resolveType) { |
769 | case GlobalProperty: |
770 | case GlobalPropertyWithVarInjectionChecks: { |
771 | JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock); |
772 | RELEASE_ASSERT(constantScope); |
773 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
774 | load32(&metadata.m_globalLexicalBindingEpoch, regT1); |
775 | addSlowCase(branch32(NotEqual, AbsoluteAddress(m_codeBlock->globalObject()->addressOfGlobalLexicalBindingEpoch()), regT1)); |
776 | move(TrustedImmPtr(constantScope), regT0); |
777 | emitPutVirtualRegister(dst); |
778 | break; |
779 | } |
780 | |
781 | case GlobalVar: |
782 | case GlobalVarWithVarInjectionChecks: |
783 | case GlobalLexicalVar: |
784 | case GlobalLexicalVarWithVarInjectionChecks: { |
785 | JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock); |
786 | RELEASE_ASSERT(constantScope); |
787 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
788 | move(TrustedImmPtr(constantScope), regT0); |
789 | emitPutVirtualRegister(dst); |
790 | break; |
791 | } |
792 | case ClosureVar: |
793 | case ClosureVarWithVarInjectionChecks: |
794 | emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth); |
795 | break; |
796 | case ModuleVar: |
797 | move(TrustedImmPtr(metadata.m_lexicalEnvironment.get()), regT0); |
798 | emitPutVirtualRegister(dst); |
799 | break; |
800 | case Dynamic: |
801 | addSlowCase(jump()); |
802 | break; |
803 | case LocalClosureVar: |
804 | case UnresolvedProperty: |
805 | case UnresolvedPropertyWithVarInjectionChecks: |
806 | RELEASE_ASSERT_NOT_REACHED(); |
807 | } |
808 | }; |
809 | |
810 | switch (resolveType) { |
811 | case GlobalProperty: |
812 | case GlobalPropertyWithVarInjectionChecks: { |
813 | JumpList skipToEnd; |
814 | load32(&metadata.m_resolveType, regT0); |
815 | |
816 | Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(resolveType)); |
817 | emitCode(resolveType); |
818 | skipToEnd.append(jump()); |
819 | |
820 | notGlobalProperty.link(this); |
821 | emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar); |
822 | |
823 | skipToEnd.link(this); |
824 | break; |
825 | } |
826 | case UnresolvedProperty: |
827 | case UnresolvedPropertyWithVarInjectionChecks: { |
828 | JumpList skipToEnd; |
829 | load32(&metadata.m_resolveType, regT0); |
830 | |
831 | Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty)); |
832 | emitCode(GlobalProperty); |
833 | skipToEnd.append(jump()); |
834 | notGlobalProperty.link(this); |
835 | |
836 | Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks)); |
837 | emitCode(GlobalPropertyWithVarInjectionChecks); |
838 | skipToEnd.append(jump()); |
839 | notGlobalPropertyWithVarInjections.link(this); |
840 | |
841 | Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar)); |
842 | emitCode(GlobalLexicalVar); |
843 | skipToEnd.append(jump()); |
844 | notGlobalLexicalVar.link(this); |
845 | |
846 | Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks)); |
847 | emitCode(GlobalLexicalVarWithVarInjectionChecks); |
848 | skipToEnd.append(jump()); |
849 | notGlobalLexicalVarWithVarInjections.link(this); |
850 | |
851 | addSlowCase(jump()); |
852 | skipToEnd.link(this); |
853 | break; |
854 | } |
855 | |
856 | default: |
857 | emitCode(resolveType); |
858 | break; |
859 | } |
860 | } |
861 | |
862 | void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot) |
863 | { |
864 | loadPtr(structureSlot, regT1); |
865 | emitGetVirtualRegister(scope, regT0); |
866 | addSlowCase(branchTestPtr(Zero, regT1)); |
867 | load32(Address(regT1, Structure::structureIDOffset()), regT1); |
868 | addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1)); |
869 | } |
870 | |
871 | void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg reg) |
872 | { |
873 | loadPtr(operand, reg); |
874 | } |
875 | |
876 | void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg reg) |
877 | { |
878 | loadPtr(operand, reg); |
879 | loadPtr(reg, reg); |
880 | } |
881 | |
882 | void JIT::emitGetClosureVar(int scope, uintptr_t operand) |
883 | { |
884 | emitGetVirtualRegister(scope, regT0); |
885 | loadPtr(Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register)), regT0); |
886 | } |
887 | |
888 | void JIT::emit_op_get_from_scope(const Instruction* currentInstruction) |
889 | { |
890 | auto bytecode = currentInstruction->as<OpGetFromScope>(); |
891 | auto& metadata = bytecode.metadata(m_codeBlock); |
892 | int dst = bytecode.m_dst.offset(); |
893 | int scope = bytecode.m_scope.offset(); |
894 | ResolveType resolveType = metadata.m_getPutInfo.resolveType(); |
895 | Structure** structureSlot = metadata.m_structure.slot(); |
896 | uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.m_operand); |
897 | |
898 | auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { |
899 | switch (resolveType) { |
900 | case GlobalProperty: |
901 | case GlobalPropertyWithVarInjectionChecks: { |
902 | emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection since we don't cache structures for anything but the GlobalObject. Additionally, resolve_scope handles checking for the var injection. |
903 | GPRReg base = regT0; |
904 | GPRReg result = regT0; |
905 | GPRReg offset = regT1; |
906 | GPRReg scratch = regT2; |
907 | |
908 | jitAssert(scopedLambda<Jump(void)>([&] () -> Jump { |
909 | return branchPtr(Equal, base, TrustedImmPtr(m_codeBlock->globalObject())); |
910 | })); |
911 | |
912 | load32(operandSlot, offset); |
913 | if (!ASSERT_DISABLED) { |
914 | Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset)); |
915 | abortWithReason(JITOffsetIsNotOutOfLine); |
916 | isOutOfLine.link(this); |
917 | } |
918 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); |
919 | neg32(offset); |
920 | signExtend32ToPtr(offset, offset); |
921 | load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result); |
922 | break; |
923 | } |
924 | case GlobalVar: |
925 | case GlobalVarWithVarInjectionChecks: |
926 | case GlobalLexicalVar: |
927 | case GlobalLexicalVarWithVarInjectionChecks: |
928 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
929 | if (indirectLoadForOperand) |
930 | emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0); |
931 | else |
932 | emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0); |
933 | if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check. |
934 | addSlowCase(branchIfEmpty(regT0)); |
935 | break; |
936 | case ClosureVar: |
937 | case ClosureVarWithVarInjectionChecks: |
938 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
939 | emitGetClosureVar(scope, *operandSlot); |
940 | break; |
941 | case Dynamic: |
942 | addSlowCase(jump()); |
943 | break; |
944 | case LocalClosureVar: |
945 | case ModuleVar: |
946 | case UnresolvedProperty: |
947 | case UnresolvedPropertyWithVarInjectionChecks: |
948 | RELEASE_ASSERT_NOT_REACHED(); |
949 | } |
950 | }; |
951 | |
952 | switch (resolveType) { |
953 | case GlobalProperty: |
954 | case GlobalPropertyWithVarInjectionChecks: { |
955 | JumpList skipToEnd; |
956 | load32(&metadata.m_getPutInfo, regT0); |
957 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
958 | |
959 | Jump isNotGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(resolveType)); |
960 | emitCode(resolveType, false); |
961 | skipToEnd.append(jump()); |
962 | |
963 | isNotGlobalProperty.link(this); |
964 | emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar, true); |
965 | |
966 | skipToEnd.link(this); |
967 | break; |
968 | } |
969 | case UnresolvedProperty: |
970 | case UnresolvedPropertyWithVarInjectionChecks: { |
971 | JumpList skipToEnd; |
972 | load32(&metadata.m_getPutInfo, regT0); |
973 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
974 | |
975 | Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty)); |
976 | Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks)); |
977 | isGlobalProperty.link(this); |
978 | emitCode(GlobalProperty, false); |
979 | skipToEnd.append(jump()); |
980 | notGlobalPropertyWithVarInjections.link(this); |
981 | |
982 | Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar)); |
983 | emitCode(GlobalLexicalVar, true); |
984 | skipToEnd.append(jump()); |
985 | notGlobalLexicalVar.link(this); |
986 | |
987 | Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks)); |
988 | emitCode(GlobalLexicalVarWithVarInjectionChecks, true); |
989 | skipToEnd.append(jump()); |
990 | notGlobalLexicalVarWithVarInjections.link(this); |
991 | |
992 | addSlowCase(jump()); |
993 | |
994 | skipToEnd.link(this); |
995 | break; |
996 | } |
997 | |
998 | default: |
999 | emitCode(resolveType, false); |
1000 | break; |
1001 | } |
1002 | emitPutVirtualRegister(dst); |
1003 | emitValueProfilingSite(metadata); |
1004 | } |
1005 | |
1006 | void JIT::emitSlow_op_get_from_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1007 | { |
1008 | linkAllSlowCases(iter); |
1009 | |
1010 | auto bytecode = currentInstruction->as<OpGetFromScope>(); |
1011 | int dst = bytecode.m_dst.offset(); |
1012 | callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetFromScope, dst, currentInstruction); |
1013 | } |
1014 | |
1015 | void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set) |
1016 | { |
1017 | emitGetVirtualRegister(value, regT0); |
1018 | emitNotifyWrite(set); |
1019 | storePtr(regT0, operand); |
1020 | } |
1021 | void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet) |
1022 | { |
1023 | emitGetVirtualRegister(value, regT0); |
1024 | loadPtr(indirectWatchpointSet, regT1); |
1025 | emitNotifyWrite(regT1); |
1026 | loadPtr(addressOfOperand, regT1); |
1027 | storePtr(regT0, regT1); |
1028 | } |
1029 | |
1030 | void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set) |
1031 | { |
1032 | emitGetVirtualRegister(value, regT1); |
1033 | emitGetVirtualRegister(scope, regT0); |
1034 | emitNotifyWrite(set); |
1035 | storePtr(regT1, Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register))); |
1036 | } |
1037 | |
1038 | void JIT::emit_op_put_to_scope(const Instruction* currentInstruction) |
1039 | { |
1040 | auto bytecode = currentInstruction->as<OpPutToScope>(); |
1041 | auto& metadata = bytecode.metadata(m_codeBlock); |
1042 | int scope = bytecode.m_scope.offset(); |
1043 | int value = bytecode.m_value.offset(); |
1044 | GetPutInfo getPutInfo = copiedGetPutInfo(bytecode); |
1045 | ResolveType resolveType = getPutInfo.resolveType(); |
1046 | Structure** structureSlot = metadata.m_structure.slot(); |
1047 | uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.m_operand); |
1048 | |
1049 | auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { |
1050 | switch (resolveType) { |
1051 | case GlobalProperty: |
1052 | case GlobalPropertyWithVarInjectionChecks: { |
1053 | emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection since we don't cache structures for anything but the GlobalObject. Additionally, resolve_scope handles checking for the var injection. |
1054 | emitGetVirtualRegister(value, regT2); |
1055 | |
1056 | jitAssert(scopedLambda<Jump(void)>([&] () -> Jump { |
1057 | return branchPtr(Equal, regT0, TrustedImmPtr(m_codeBlock->globalObject())); |
1058 | })); |
1059 | |
1060 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); |
1061 | loadPtr(operandSlot, regT1); |
1062 | negPtr(regT1); |
1063 | storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue))); |
1064 | emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue); |
1065 | break; |
1066 | } |
1067 | case GlobalVar: |
1068 | case GlobalVarWithVarInjectionChecks: |
1069 | case GlobalLexicalVar: |
1070 | case GlobalLexicalVarWithVarInjectionChecks: { |
1071 | JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock); |
1072 | RELEASE_ASSERT(constantScope); |
1073 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
1074 | if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { |
1075 | // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically. |
1076 | if (indirectLoadForOperand) |
1077 | emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0); |
1078 | else |
1079 | emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0); |
1080 | addSlowCase(branchIfEmpty(regT0)); |
1081 | } |
1082 | if (indirectLoadForOperand) |
1083 | emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, &metadata.m_watchpointSet); |
1084 | else |
1085 | emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, metadata.m_watchpointSet); |
1086 | emitWriteBarrier(constantScope, value, ShouldFilterValue); |
1087 | break; |
1088 | } |
1089 | case LocalClosureVar: |
1090 | case ClosureVar: |
1091 | case ClosureVarWithVarInjectionChecks: |
1092 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
1093 | emitPutClosureVar(scope, *operandSlot, value, metadata.m_watchpointSet); |
1094 | emitWriteBarrier(scope, value, ShouldFilterValue); |
1095 | break; |
1096 | case ModuleVar: |
1097 | case Dynamic: |
1098 | addSlowCase(jump()); |
1099 | break; |
1100 | case UnresolvedProperty: |
1101 | case UnresolvedPropertyWithVarInjectionChecks: |
1102 | RELEASE_ASSERT_NOT_REACHED(); |
1103 | break; |
1104 | } |
1105 | }; |
1106 | |
1107 | switch (resolveType) { |
1108 | case GlobalProperty: |
1109 | case GlobalPropertyWithVarInjectionChecks: { |
1110 | JumpList skipToEnd; |
1111 | load32(&metadata.m_getPutInfo, regT0); |
1112 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
1113 | |
1114 | Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(resolveType)); |
1115 | Jump isGlobalLexicalVar = branch32(Equal, regT0, TrustedImm32(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar)); |
1116 | addSlowCase(jump()); // Dynamic, it can happen if we attempt to put a value to already-initialized const binding. |
1117 | |
1118 | isGlobalLexicalVar.link(this); |
1119 | emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar, true); |
1120 | skipToEnd.append(jump()); |
1121 | |
1122 | isGlobalProperty.link(this); |
1123 | emitCode(resolveType, false); |
1124 | skipToEnd.link(this); |
1125 | break; |
1126 | } |
1127 | case UnresolvedProperty: |
1128 | case UnresolvedPropertyWithVarInjectionChecks: { |
1129 | JumpList skipToEnd; |
1130 | load32(&metadata.m_getPutInfo, regT0); |
1131 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
1132 | |
1133 | Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty)); |
1134 | Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks)); |
1135 | isGlobalProperty.link(this); |
1136 | emitCode(GlobalProperty, false); |
1137 | skipToEnd.append(jump()); |
1138 | notGlobalPropertyWithVarInjections.link(this); |
1139 | |
1140 | Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar)); |
1141 | emitCode(GlobalLexicalVar, true); |
1142 | skipToEnd.append(jump()); |
1143 | notGlobalLexicalVar.link(this); |
1144 | |
1145 | Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks)); |
1146 | emitCode(GlobalLexicalVarWithVarInjectionChecks, true); |
1147 | skipToEnd.append(jump()); |
1148 | notGlobalLexicalVarWithVarInjections.link(this); |
1149 | |
1150 | addSlowCase(jump()); |
1151 | |
1152 | skipToEnd.link(this); |
1153 | break; |
1154 | } |
1155 | |
1156 | default: |
1157 | emitCode(resolveType, false); |
1158 | break; |
1159 | } |
1160 | } |
1161 | |
1162 | void JIT::emitSlow_op_put_to_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1163 | { |
1164 | linkAllSlowCases(iter); |
1165 | |
1166 | auto bytecode = currentInstruction->as<OpPutToScope>(); |
1167 | ResolveType resolveType = copiedGetPutInfo(bytecode).resolveType(); |
1168 | if (resolveType == ModuleVar) { |
1169 | JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error); |
1170 | slowPathCall.call(); |
1171 | } else |
1172 | callOperation(operationPutToScope, currentInstruction); |
1173 | } |
1174 | |
1175 | void JIT::emit_op_get_from_arguments(const Instruction* currentInstruction) |
1176 | { |
1177 | auto bytecode = currentInstruction->as<OpGetFromArguments>(); |
1178 | int dst = bytecode.m_dst.offset(); |
1179 | int arguments = bytecode.m_arguments.offset(); |
1180 | int index = bytecode.m_index; |
1181 | |
1182 | emitGetVirtualRegister(arguments, regT0); |
1183 | load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0); |
1184 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
1185 | emitPutVirtualRegister(dst); |
1186 | } |
1187 | |
1188 | void JIT::emit_op_put_to_arguments(const Instruction* currentInstruction) |
1189 | { |
1190 | auto bytecode = currentInstruction->as<OpPutToArguments>(); |
1191 | int arguments = bytecode.m_arguments.offset(); |
1192 | int index = bytecode.m_index; |
1193 | int value = bytecode.m_value.offset(); |
1194 | |
1195 | emitGetVirtualRegister(arguments, regT0); |
1196 | emitGetVirtualRegister(value, regT1); |
1197 | store64(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>))); |
1198 | |
1199 | emitWriteBarrier(arguments, value, ShouldFilterValue); |
1200 | } |
1201 | |
1202 | void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode) |
1203 | { |
1204 | Jump valueNotCell; |
1205 | if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) { |
1206 | emitGetVirtualRegister(value, regT0); |
1207 | valueNotCell = branchIfNotCell(regT0); |
1208 | } |
1209 | |
1210 | emitGetVirtualRegister(owner, regT0); |
1211 | Jump ownerNotCell; |
1212 | if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase) |
1213 | ownerNotCell = branchIfNotCell(regT0); |
1214 | |
1215 | Jump ownerIsRememberedOrInEden = barrierBranch(*vm(), regT0, regT1); |
1216 | callOperation(operationWriteBarrierSlowPath, regT0); |
1217 | ownerIsRememberedOrInEden.link(this); |
1218 | |
1219 | if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase) |
1220 | ownerNotCell.link(this); |
1221 | if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) |
1222 | valueNotCell.link(this); |
1223 | } |
1224 | |
1225 | void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode) |
1226 | { |
1227 | emitGetVirtualRegister(value, regT0); |
1228 | Jump valueNotCell; |
1229 | if (mode == ShouldFilterValue) |
1230 | valueNotCell = branchIfNotCell(regT0); |
1231 | |
1232 | emitWriteBarrier(owner); |
1233 | |
1234 | if (mode == ShouldFilterValue) |
1235 | valueNotCell.link(this); |
1236 | } |
1237 | |
1238 | #else // USE(JSVALUE64) |
1239 | |
1240 | void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode) |
1241 | { |
1242 | Jump valueNotCell; |
1243 | if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) { |
1244 | emitLoadTag(value, regT0); |
1245 | valueNotCell = branchIfNotCell(regT0); |
1246 | } |
1247 | |
1248 | emitLoad(owner, regT0, regT1); |
1249 | Jump ownerNotCell; |
1250 | if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue) |
1251 | ownerNotCell = branchIfNotCell(regT0); |
1252 | |
1253 | Jump ownerIsRememberedOrInEden = barrierBranch(*vm(), regT1, regT2); |
1254 | callOperation(operationWriteBarrierSlowPath, regT1); |
1255 | ownerIsRememberedOrInEden.link(this); |
1256 | |
1257 | if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue) |
1258 | ownerNotCell.link(this); |
1259 | if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) |
1260 | valueNotCell.link(this); |
1261 | } |
1262 | |
1263 | void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode) |
1264 | { |
1265 | Jump valueNotCell; |
1266 | if (mode == ShouldFilterValue) { |
1267 | emitLoadTag(value, regT0); |
1268 | valueNotCell = branchIfNotCell(regT0); |
1269 | } |
1270 | |
1271 | emitWriteBarrier(owner); |
1272 | |
1273 | if (mode == ShouldFilterValue) |
1274 | valueNotCell.link(this); |
1275 | } |
1276 | |
1277 | #endif // USE(JSVALUE64) |
1278 | |
1279 | void JIT::emitWriteBarrier(JSCell* owner) |
1280 | { |
1281 | Jump ownerIsRememberedOrInEden = barrierBranch(*vm(), owner, regT0); |
1282 | callOperation(operationWriteBarrierSlowPath, owner); |
1283 | ownerIsRememberedOrInEden.link(this); |
1284 | } |
1285 | |
1286 | void JIT::emitByValIdentifierCheck(ByValInfo* byValInfo, RegisterID cell, RegisterID scratch, const Identifier& propertyName, JumpList& slowCases) |
1287 | { |
1288 | if (propertyName.isSymbol()) |
1289 | slowCases.append(branchPtr(NotEqual, cell, TrustedImmPtr(byValInfo->cachedSymbol.get()))); |
1290 | else { |
1291 | slowCases.append(branchIfNotString(cell)); |
1292 | loadPtr(Address(cell, JSString::offsetOfValue()), scratch); |
1293 | slowCases.append(branchPtr(NotEqual, scratch, TrustedImmPtr(propertyName.impl()))); |
1294 | } |
1295 | } |
1296 | |
1297 | void JIT::privateCompileGetByVal(const ConcurrentJSLocker&, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) |
1298 | { |
1299 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
1300 | |
1301 | PatchableJump badType; |
1302 | JumpList slowCases; |
1303 | |
1304 | switch (arrayMode) { |
1305 | case JITInt32: |
1306 | slowCases = emitInt32GetByVal(currentInstruction, badType); |
1307 | break; |
1308 | case JITDouble: |
1309 | slowCases = emitDoubleGetByVal(currentInstruction, badType); |
1310 | break; |
1311 | case JITContiguous: |
1312 | slowCases = emitContiguousGetByVal(currentInstruction, badType); |
1313 | break; |
1314 | case JITArrayStorage: |
1315 | slowCases = emitArrayStorageGetByVal(currentInstruction, badType); |
1316 | break; |
1317 | case JITDirectArguments: |
1318 | slowCases = emitDirectArgumentsGetByVal(currentInstruction, badType); |
1319 | break; |
1320 | case JITScopedArguments: |
1321 | slowCases = emitScopedArgumentsGetByVal(currentInstruction, badType); |
1322 | break; |
1323 | default: |
1324 | TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode); |
1325 | if (isInt(type)) |
1326 | slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, type); |
1327 | else |
1328 | slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, type); |
1329 | break; |
1330 | } |
1331 | |
1332 | Jump done = jump(); |
1333 | |
1334 | LinkBuffer patchBuffer(*this, m_codeBlock); |
1335 | |
1336 | patchBuffer.link(badType, byValInfo->slowPathTarget); |
1337 | patchBuffer.link(slowCases, byValInfo->slowPathTarget); |
1338 | |
1339 | patchBuffer.link(done, byValInfo->badTypeDoneTarget); |
1340 | |
1341 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1342 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1343 | "Baseline get_by_val stub for %s, return point %p" , toCString(*m_codeBlock).data(), returnAddress.value()); |
1344 | |
1345 | MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code())); |
1346 | MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationGetByValGeneric)); |
1347 | } |
1348 | |
1349 | void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName) |
1350 | { |
1351 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
1352 | auto bytecode = currentInstruction->as<OpGetByVal>(); |
1353 | |
1354 | Jump fastDoneCase; |
1355 | Jump slowDoneCase; |
1356 | JumpList slowCases; |
1357 | |
1358 | JITGetByIdGenerator gen = emitGetByValWithCachedId(byValInfo, bytecode, propertyName, fastDoneCase, slowDoneCase, slowCases); |
1359 | |
1360 | ConcurrentJSLocker locker(m_codeBlock->m_lock); |
1361 | LinkBuffer patchBuffer(*this, m_codeBlock); |
1362 | patchBuffer.link(slowCases, byValInfo->slowPathTarget); |
1363 | patchBuffer.link(fastDoneCase, byValInfo->badTypeDoneTarget); |
1364 | patchBuffer.link(slowDoneCase, byValInfo->badTypeNextHotPathTarget); |
1365 | if (!m_exceptionChecks.empty()) |
1366 | patchBuffer.link(m_exceptionChecks, byValInfo->exceptionHandler); |
1367 | |
1368 | for (const auto& callSite : m_calls) { |
1369 | if (callSite.callee) |
1370 | patchBuffer.link(callSite.from, callSite.callee); |
1371 | } |
1372 | gen.finalize(patchBuffer, patchBuffer); |
1373 | |
1374 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1375 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1376 | "Baseline get_by_val with cached property name '%s' stub for %s, return point %p" , propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()); |
1377 | byValInfo->stubInfo = gen.stubInfo(); |
1378 | |
1379 | MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code())); |
1380 | MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationGetByValGeneric)); |
1381 | } |
1382 | |
1383 | template<typename Op> |
1384 | void JIT::privateCompilePutByVal(const ConcurrentJSLocker&, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) |
1385 | { |
1386 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
1387 | auto bytecode = currentInstruction->as<Op>(); |
1388 | |
1389 | PatchableJump badType; |
1390 | JumpList slowCases; |
1391 | |
1392 | bool needsLinkForWriteBarrier = false; |
1393 | |
1394 | switch (arrayMode) { |
1395 | case JITInt32: |
1396 | slowCases = emitInt32PutByVal(bytecode, badType); |
1397 | break; |
1398 | case JITDouble: |
1399 | slowCases = emitDoublePutByVal(bytecode, badType); |
1400 | break; |
1401 | case JITContiguous: |
1402 | slowCases = emitContiguousPutByVal(bytecode, badType); |
1403 | needsLinkForWriteBarrier = true; |
1404 | break; |
1405 | case JITArrayStorage: |
1406 | slowCases = emitArrayStoragePutByVal(bytecode, badType); |
1407 | needsLinkForWriteBarrier = true; |
1408 | break; |
1409 | default: |
1410 | TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode); |
1411 | if (isInt(type)) |
1412 | slowCases = emitIntTypedArrayPutByVal(bytecode, badType, type); |
1413 | else |
1414 | slowCases = emitFloatTypedArrayPutByVal(bytecode, badType, type); |
1415 | break; |
1416 | } |
1417 | |
1418 | Jump done = jump(); |
1419 | |
1420 | LinkBuffer patchBuffer(*this, m_codeBlock); |
1421 | patchBuffer.link(badType, byValInfo->slowPathTarget); |
1422 | patchBuffer.link(slowCases, byValInfo->slowPathTarget); |
1423 | patchBuffer.link(done, byValInfo->badTypeDoneTarget); |
1424 | if (needsLinkForWriteBarrier) { |
1425 | ASSERT(removeCodePtrTag(m_calls.last().callee.executableAddress()) == removeCodePtrTag(operationWriteBarrierSlowPath)); |
1426 | patchBuffer.link(m_calls.last().from, m_calls.last().callee); |
1427 | } |
1428 | |
1429 | bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct; |
1430 | if (!isDirect) { |
1431 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1432 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1433 | "Baseline put_by_val stub for %s, return point %p" , toCString(*m_codeBlock).data(), returnAddress.value()); |
1434 | |
1435 | } else { |
1436 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1437 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1438 | "Baseline put_by_val_direct stub for %s, return point %p" , toCString(*m_codeBlock).data(), returnAddress.value()); |
1439 | } |
1440 | MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code())); |
1441 | MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric)); |
1442 | } |
1443 | // This function is only consumed from another translation unit (JITOperations.cpp), |
1444 | // so we list off the two expected specializations in advance. |
1445 | template void JIT::privateCompilePutByVal<OpPutByVal>(const ConcurrentJSLocker&, ByValInfo*, ReturnAddressPtr, JITArrayMode); |
1446 | template void JIT::privateCompilePutByVal<OpPutByValDirect>(const ConcurrentJSLocker&, ByValInfo*, ReturnAddressPtr, JITArrayMode); |
1447 | |
1448 | template<typename Op> |
1449 | void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName) |
1450 | { |
1451 | ASSERT((putKind == Direct && Op::opcodeID == op_put_by_val_direct) || (putKind == NotDirect && Op::opcodeID == op_put_by_val)); |
1452 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
1453 | auto bytecode = currentInstruction->as<Op>(); |
1454 | |
1455 | JumpList doneCases; |
1456 | JumpList slowCases; |
1457 | |
1458 | JITPutByIdGenerator gen = emitPutByValWithCachedId(byValInfo, bytecode, putKind, propertyName, doneCases, slowCases); |
1459 | |
1460 | ConcurrentJSLocker locker(m_codeBlock->m_lock); |
1461 | LinkBuffer patchBuffer(*this, m_codeBlock); |
1462 | patchBuffer.link(slowCases, byValInfo->slowPathTarget); |
1463 | patchBuffer.link(doneCases, byValInfo->badTypeDoneTarget); |
1464 | if (!m_exceptionChecks.empty()) |
1465 | patchBuffer.link(m_exceptionChecks, byValInfo->exceptionHandler); |
1466 | |
1467 | for (const auto& callSite : m_calls) { |
1468 | if (callSite.callee) |
1469 | patchBuffer.link(callSite.from, callSite.callee); |
1470 | } |
1471 | gen.finalize(patchBuffer, patchBuffer); |
1472 | |
1473 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1474 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1475 | "Baseline put_by_val%s with cached property name '%s' stub for %s, return point %p" , (putKind == Direct) ? "_direct" : "" , propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()); |
1476 | byValInfo->stubInfo = gen.stubInfo(); |
1477 | |
1478 | MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code())); |
1479 | MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric)); |
1480 | } |
1481 | // This function is only consumed from another translation unit (JITOperations.cpp), |
1482 | // so we list off the two expected specializations in advance. |
1483 | template void JIT::privateCompilePutByValWithCachedId<OpPutByVal>(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&); |
1484 | template void JIT::privateCompilePutByValWithCachedId<OpPutByValDirect>(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&); |
1485 | |
1486 | JIT::JumpList JIT::emitDoubleLoad(const Instruction*, PatchableJump& badType) |
1487 | { |
1488 | #if USE(JSVALUE64) |
1489 | RegisterID base = regT0; |
1490 | RegisterID property = regT1; |
1491 | RegisterID indexing = regT2; |
1492 | RegisterID scratch = regT3; |
1493 | #else |
1494 | RegisterID base = regT0; |
1495 | RegisterID property = regT2; |
1496 | RegisterID indexing = regT1; |
1497 | RegisterID scratch = regT3; |
1498 | #endif |
1499 | |
1500 | JumpList slowCases; |
1501 | |
1502 | badType = patchableBranch32(NotEqual, indexing, TrustedImm32(DoubleShape)); |
1503 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); |
1504 | slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength()))); |
1505 | loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0); |
1506 | slowCases.append(branchIfNaN(fpRegT0)); |
1507 | |
1508 | return slowCases; |
1509 | } |
1510 | |
1511 | JIT::JumpList JIT::emitContiguousLoad(const Instruction*, PatchableJump& badType, IndexingType expectedShape) |
1512 | { |
1513 | #if USE(JSVALUE64) |
1514 | RegisterID base = regT0; |
1515 | RegisterID property = regT1; |
1516 | RegisterID indexing = regT2; |
1517 | JSValueRegs result = JSValueRegs(regT0); |
1518 | RegisterID scratch = regT3; |
1519 | #else |
1520 | RegisterID base = regT0; |
1521 | RegisterID property = regT2; |
1522 | RegisterID indexing = regT1; |
1523 | JSValueRegs result = JSValueRegs(regT1, regT0); |
1524 | RegisterID scratch = regT3; |
1525 | #endif |
1526 | |
1527 | JumpList slowCases; |
1528 | |
1529 | badType = patchableBranch32(NotEqual, indexing, TrustedImm32(expectedShape)); |
1530 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); |
1531 | slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength()))); |
1532 | loadValue(BaseIndex(scratch, property, TimesEight), result); |
1533 | slowCases.append(branchIfEmpty(result)); |
1534 | |
1535 | return slowCases; |
1536 | } |
1537 | |
1538 | JIT::JumpList JIT::emitArrayStorageLoad(const Instruction*, PatchableJump& badType) |
1539 | { |
1540 | #if USE(JSVALUE64) |
1541 | RegisterID base = regT0; |
1542 | RegisterID property = regT1; |
1543 | RegisterID indexing = regT2; |
1544 | JSValueRegs result = JSValueRegs(regT0); |
1545 | RegisterID scratch = regT3; |
1546 | #else |
1547 | RegisterID base = regT0; |
1548 | RegisterID property = regT2; |
1549 | RegisterID indexing = regT1; |
1550 | JSValueRegs result = JSValueRegs(regT1, regT0); |
1551 | RegisterID scratch = regT3; |
1552 | #endif |
1553 | |
1554 | JumpList slowCases; |
1555 | |
1556 | add32(TrustedImm32(-ArrayStorageShape), indexing, scratch); |
1557 | badType = patchableBranch32(Above, scratch, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)); |
1558 | |
1559 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); |
1560 | slowCases.append(branch32(AboveOrEqual, property, Address(scratch, ArrayStorage::vectorLengthOffset()))); |
1561 | |
1562 | loadValue(BaseIndex(scratch, property, TimesEight, ArrayStorage::vectorOffset()), result); |
1563 | slowCases.append(branchIfEmpty(result)); |
1564 | |
1565 | return slowCases; |
1566 | } |
1567 | |
1568 | JIT::JumpList JIT::emitDirectArgumentsGetByVal(const Instruction*, PatchableJump& badType) |
1569 | { |
1570 | JumpList slowCases; |
1571 | |
1572 | #if USE(JSVALUE64) |
1573 | RegisterID base = regT0; |
1574 | RegisterID property = regT1; |
1575 | JSValueRegs result = JSValueRegs(regT0); |
1576 | RegisterID scratch = regT3; |
1577 | RegisterID scratch2 = regT4; |
1578 | #else |
1579 | RegisterID base = regT0; |
1580 | RegisterID property = regT2; |
1581 | JSValueRegs result = JSValueRegs(regT1, regT0); |
1582 | RegisterID scratch = regT3; |
1583 | RegisterID scratch2 = regT4; |
1584 | #endif |
1585 | |
1586 | load8(Address(base, JSCell::typeInfoTypeOffset()), scratch); |
1587 | badType = patchableBranch32(NotEqual, scratch, TrustedImm32(DirectArgumentsType)); |
1588 | |
1589 | load32(Address(base, DirectArguments::offsetOfLength()), scratch2); |
1590 | slowCases.append(branch32(AboveOrEqual, property, scratch2)); |
1591 | slowCases.append(branchTestPtr(NonZero, Address(base, DirectArguments::offsetOfMappedArguments()))); |
1592 | |
1593 | loadValue(BaseIndex(base, property, TimesEight, DirectArguments::storageOffset()), result); |
1594 | |
1595 | return slowCases; |
1596 | } |
1597 | |
1598 | JIT::JumpList JIT::emitScopedArgumentsGetByVal(const Instruction*, PatchableJump& badType) |
1599 | { |
1600 | JumpList slowCases; |
1601 | |
1602 | #if USE(JSVALUE64) |
1603 | RegisterID base = regT0; |
1604 | RegisterID property = regT1; |
1605 | JSValueRegs result = JSValueRegs(regT0); |
1606 | RegisterID scratch = regT3; |
1607 | RegisterID scratch2 = regT4; |
1608 | RegisterID scratch3 = regT5; |
1609 | #else |
1610 | RegisterID base = regT0; |
1611 | RegisterID property = regT2; |
1612 | JSValueRegs result = JSValueRegs(regT1, regT0); |
1613 | RegisterID scratch = regT3; |
1614 | RegisterID scratch2 = regT4; |
1615 | RegisterID scratch3 = regT5; |
1616 | #endif |
1617 | |
1618 | load8(Address(base, JSCell::typeInfoTypeOffset()), scratch); |
1619 | badType = patchableBranch32(NotEqual, scratch, TrustedImm32(ScopedArgumentsType)); |
1620 | loadPtr(Address(base, ScopedArguments::offsetOfStorage()), scratch3); |
1621 | slowCases.append(branch32(AboveOrEqual, property, Address(scratch3, ScopedArguments::offsetOfTotalLengthInStorage()))); |
1622 | |
1623 | loadPtr(Address(base, ScopedArguments::offsetOfTable()), scratch); |
1624 | load32(Address(scratch, ScopedArgumentsTable::offsetOfLength()), scratch2); |
1625 | Jump overflowCase = branch32(AboveOrEqual, property, scratch2); |
1626 | loadPtr(Address(base, ScopedArguments::offsetOfScope()), scratch2); |
1627 | loadPtr(Address(scratch, ScopedArgumentsTable::offsetOfArguments()), scratch); |
1628 | load32(BaseIndex(scratch, property, TimesFour), scratch); |
1629 | slowCases.append(branch32(Equal, scratch, TrustedImm32(ScopeOffset::invalidOffset))); |
1630 | loadValue(BaseIndex(scratch2, scratch, TimesEight, JSLexicalEnvironment::offsetOfVariables()), result); |
1631 | Jump done = jump(); |
1632 | overflowCase.link(this); |
1633 | sub32(property, scratch2); |
1634 | neg32(scratch2); |
1635 | loadValue(BaseIndex(scratch3, scratch2, TimesEight), result); |
1636 | slowCases.append(branchIfEmpty(result)); |
1637 | done.link(this); |
1638 | |
1639 | load32(Address(scratch3, ScopedArguments::offsetOfTotalLengthInStorage()), scratch); |
1640 | emitPreparePreciseIndexMask32(property, scratch, scratch2); |
1641 | andPtr(scratch2, result.payloadGPR()); |
1642 | |
1643 | return slowCases; |
1644 | } |
1645 | |
1646 | JIT::JumpList JIT::emitIntTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType type) |
1647 | { |
1648 | ASSERT(isInt(type)); |
1649 | |
1650 | // The best way to test the array type is to use the classInfo. We need to do so without |
1651 | // clobbering the register that holds the indexing type, base, and property. |
1652 | |
1653 | #if USE(JSVALUE64) |
1654 | RegisterID base = regT0; |
1655 | RegisterID property = regT1; |
1656 | JSValueRegs result = JSValueRegs(regT0); |
1657 | RegisterID scratch = regT3; |
1658 | RegisterID scratch2 = regT4; |
1659 | #else |
1660 | RegisterID base = regT0; |
1661 | RegisterID property = regT2; |
1662 | JSValueRegs result = JSValueRegs(regT1, regT0); |
1663 | RegisterID scratch = regT3; |
1664 | RegisterID scratch2 = regT4; |
1665 | #endif |
1666 | RegisterID resultPayload = result.payloadGPR(); |
1667 | |
1668 | JumpList slowCases; |
1669 | |
1670 | load8(Address(base, JSCell::typeInfoTypeOffset()), scratch); |
1671 | badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type))); |
1672 | load32(Address(base, JSArrayBufferView::offsetOfLength()), scratch2); |
1673 | slowCases.append(branch32(AboveOrEqual, property, scratch2)); |
1674 | loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch); |
1675 | cageConditionally(Gigacage::Primitive, scratch, scratch2, scratch2); |
1676 | |
1677 | switch (elementSize(type)) { |
1678 | case 1: |
1679 | if (JSC::isSigned(type)) |
1680 | load8SignedExtendTo32(BaseIndex(scratch, property, TimesOne), resultPayload); |
1681 | else |
1682 | load8(BaseIndex(scratch, property, TimesOne), resultPayload); |
1683 | break; |
1684 | case 2: |
1685 | if (JSC::isSigned(type)) |
1686 | load16SignedExtendTo32(BaseIndex(scratch, property, TimesTwo), resultPayload); |
1687 | else |
1688 | load16(BaseIndex(scratch, property, TimesTwo), resultPayload); |
1689 | break; |
1690 | case 4: |
1691 | load32(BaseIndex(scratch, property, TimesFour), resultPayload); |
1692 | break; |
1693 | default: |
1694 | CRASH(); |
1695 | } |
1696 | |
1697 | Jump done; |
1698 | if (type == TypeUint32) { |
1699 | Jump canBeInt = branch32(GreaterThanOrEqual, resultPayload, TrustedImm32(0)); |
1700 | |
1701 | convertInt32ToDouble(resultPayload, fpRegT0); |
1702 | addDouble(AbsoluteAddress(&twoToThe32), fpRegT0); |
1703 | boxDouble(fpRegT0, result); |
1704 | done = jump(); |
1705 | canBeInt.link(this); |
1706 | } |
1707 | |
1708 | boxInt32(resultPayload, result); |
1709 | if (done.isSet()) |
1710 | done.link(this); |
1711 | return slowCases; |
1712 | } |
1713 | |
1714 | JIT::JumpList JIT::emitFloatTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType type) |
1715 | { |
1716 | ASSERT(isFloat(type)); |
1717 | |
1718 | #if USE(JSVALUE64) |
1719 | RegisterID base = regT0; |
1720 | RegisterID property = regT1; |
1721 | JSValueRegs result = JSValueRegs(regT0); |
1722 | RegisterID scratch = regT3; |
1723 | RegisterID scratch2 = regT4; |
1724 | #else |
1725 | RegisterID base = regT0; |
1726 | RegisterID property = regT2; |
1727 | JSValueRegs result = JSValueRegs(regT1, regT0); |
1728 | RegisterID scratch = regT3; |
1729 | RegisterID scratch2 = regT4; |
1730 | #endif |
1731 | |
1732 | JumpList slowCases; |
1733 | |
1734 | load8(Address(base, JSCell::typeInfoTypeOffset()), scratch); |
1735 | badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type))); |
1736 | load32(Address(base, JSArrayBufferView::offsetOfLength()), scratch2); |
1737 | slowCases.append(branch32(AboveOrEqual, property, scratch2)); |
1738 | loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch); |
1739 | cageConditionally(Gigacage::Primitive, scratch, scratch2, scratch2); |
1740 | |
1741 | switch (elementSize(type)) { |
1742 | case 4: |
1743 | loadFloat(BaseIndex(scratch, property, TimesFour), fpRegT0); |
1744 | convertFloatToDouble(fpRegT0, fpRegT0); |
1745 | break; |
1746 | case 8: { |
1747 | loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0); |
1748 | break; |
1749 | } |
1750 | default: |
1751 | CRASH(); |
1752 | } |
1753 | |
1754 | purifyNaN(fpRegT0); |
1755 | |
1756 | boxDouble(fpRegT0, result); |
1757 | return slowCases; |
1758 | } |
1759 | |
1760 | template<typename Op> |
1761 | JIT::JumpList JIT::emitIntTypedArrayPutByVal(Op bytecode, PatchableJump& badType, TypedArrayType type) |
1762 | { |
1763 | auto& metadata = bytecode.metadata(m_codeBlock); |
1764 | ArrayProfile* profile = &metadata.m_arrayProfile; |
1765 | ASSERT(isInt(type)); |
1766 | |
1767 | int value = bytecode.m_value.offset(); |
1768 | |
1769 | #if USE(JSVALUE64) |
1770 | RegisterID base = regT0; |
1771 | RegisterID property = regT1; |
1772 | RegisterID earlyScratch = regT3; |
1773 | RegisterID lateScratch = regT2; |
1774 | RegisterID lateScratch2 = regT4; |
1775 | #else |
1776 | RegisterID base = regT0; |
1777 | RegisterID property = regT2; |
1778 | RegisterID earlyScratch = regT3; |
1779 | RegisterID lateScratch = regT1; |
1780 | RegisterID lateScratch2 = regT4; |
1781 | #endif |
1782 | |
1783 | JumpList slowCases; |
1784 | |
1785 | load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch); |
1786 | badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type))); |
1787 | load32(Address(base, JSArrayBufferView::offsetOfLength()), lateScratch2); |
1788 | Jump inBounds = branch32(Below, property, lateScratch2); |
1789 | emitArrayProfileOutOfBoundsSpecialCase(profile); |
1790 | slowCases.append(jump()); |
1791 | inBounds.link(this); |
1792 | |
1793 | #if USE(JSVALUE64) |
1794 | emitGetVirtualRegister(value, earlyScratch); |
1795 | slowCases.append(branchIfNotInt32(earlyScratch)); |
1796 | #else |
1797 | emitLoad(value, lateScratch, earlyScratch); |
1798 | slowCases.append(branchIfNotInt32(lateScratch)); |
1799 | #endif |
1800 | |
1801 | // We would be loading this into base as in get_by_val, except that the slow |
1802 | // path expects the base to be unclobbered. |
1803 | loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch); |
1804 | cageConditionally(Gigacage::Primitive, lateScratch, lateScratch2, lateScratch2); |
1805 | |
1806 | if (isClamped(type)) { |
1807 | ASSERT(elementSize(type) == 1); |
1808 | ASSERT(!JSC::isSigned(type)); |
1809 | Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff)); |
1810 | Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff)); |
1811 | xor32(earlyScratch, earlyScratch); |
1812 | Jump clamped = jump(); |
1813 | tooBig.link(this); |
1814 | move(TrustedImm32(0xff), earlyScratch); |
1815 | clamped.link(this); |
1816 | inBounds.link(this); |
1817 | } |
1818 | |
1819 | switch (elementSize(type)) { |
1820 | case 1: |
1821 | store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne)); |
1822 | break; |
1823 | case 2: |
1824 | store16(earlyScratch, BaseIndex(lateScratch, property, TimesTwo)); |
1825 | break; |
1826 | case 4: |
1827 | store32(earlyScratch, BaseIndex(lateScratch, property, TimesFour)); |
1828 | break; |
1829 | default: |
1830 | CRASH(); |
1831 | } |
1832 | |
1833 | return slowCases; |
1834 | } |
1835 | |
1836 | template<typename Op> |
1837 | JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Op bytecode, PatchableJump& badType, TypedArrayType type) |
1838 | { |
1839 | auto& metadata = bytecode.metadata(m_codeBlock); |
1840 | ArrayProfile* profile = &metadata.m_arrayProfile; |
1841 | ASSERT(isFloat(type)); |
1842 | |
1843 | int value = bytecode.m_value.offset(); |
1844 | |
1845 | #if USE(JSVALUE64) |
1846 | RegisterID base = regT0; |
1847 | RegisterID property = regT1; |
1848 | RegisterID earlyScratch = regT3; |
1849 | RegisterID lateScratch = regT2; |
1850 | RegisterID lateScratch2 = regT4; |
1851 | #else |
1852 | RegisterID base = regT0; |
1853 | RegisterID property = regT2; |
1854 | RegisterID earlyScratch = regT3; |
1855 | RegisterID lateScratch = regT1; |
1856 | RegisterID lateScratch2 = regT4; |
1857 | #endif |
1858 | |
1859 | JumpList slowCases; |
1860 | |
1861 | load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch); |
1862 | badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type))); |
1863 | load32(Address(base, JSArrayBufferView::offsetOfLength()), lateScratch2); |
1864 | Jump inBounds = branch32(Below, property, lateScratch2); |
1865 | emitArrayProfileOutOfBoundsSpecialCase(profile); |
1866 | slowCases.append(jump()); |
1867 | inBounds.link(this); |
1868 | |
1869 | #if USE(JSVALUE64) |
1870 | emitGetVirtualRegister(value, earlyScratch); |
1871 | Jump doubleCase = branchIfNotInt32(earlyScratch); |
1872 | convertInt32ToDouble(earlyScratch, fpRegT0); |
1873 | Jump ready = jump(); |
1874 | doubleCase.link(this); |
1875 | slowCases.append(branchIfNotNumber(earlyScratch)); |
1876 | add64(tagTypeNumberRegister, earlyScratch); |
1877 | move64ToDouble(earlyScratch, fpRegT0); |
1878 | ready.link(this); |
1879 | #else |
1880 | emitLoad(value, lateScratch, earlyScratch); |
1881 | Jump doubleCase = branchIfNotInt32(lateScratch); |
1882 | convertInt32ToDouble(earlyScratch, fpRegT0); |
1883 | Jump ready = jump(); |
1884 | doubleCase.link(this); |
1885 | slowCases.append(branch32(Above, lateScratch, TrustedImm32(JSValue::LowestTag))); |
1886 | moveIntsToDouble(earlyScratch, lateScratch, fpRegT0, fpRegT1); |
1887 | ready.link(this); |
1888 | #endif |
1889 | |
1890 | // We would be loading this into base as in get_by_val, except that the slow |
1891 | // path expects the base to be unclobbered. |
1892 | loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch); |
1893 | cageConditionally(Gigacage::Primitive, lateScratch, lateScratch2, lateScratch2); |
1894 | |
1895 | switch (elementSize(type)) { |
1896 | case 4: |
1897 | convertDoubleToFloat(fpRegT0, fpRegT0); |
1898 | storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour)); |
1899 | break; |
1900 | case 8: |
1901 | storeDouble(fpRegT0, BaseIndex(lateScratch, property, TimesEight)); |
1902 | break; |
1903 | default: |
1904 | CRASH(); |
1905 | } |
1906 | |
1907 | return slowCases; |
1908 | } |
1909 | |
1910 | template void JIT::emit_op_put_by_val<OpPutByVal>(const Instruction*); |
1911 | |
1912 | } // namespace JSC |
1913 | |
1914 | #endif // ENABLE(JIT) |
1915 | |