1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | |
28 | #if ENABLE(JIT) |
29 | #include "JIT.h" |
30 | |
31 | #include "CodeBlock.h" |
32 | #include "DirectArguments.h" |
33 | #include "GCAwareJITStubRoutine.h" |
34 | #include "GetterSetter.h" |
35 | #include "InterpreterInlines.h" |
36 | #include "JITInlines.h" |
37 | #include "JSArray.h" |
38 | #include "JSFunction.h" |
39 | #include "JSLexicalEnvironment.h" |
40 | #include "JSPromise.h" |
41 | #include "LinkBuffer.h" |
42 | #include "OpcodeInlines.h" |
43 | #include "ResultType.h" |
44 | #include "ScopedArguments.h" |
45 | #include "ScopedArgumentsTable.h" |
46 | #include "SlowPathCall.h" |
47 | #include "StructureStubInfo.h" |
48 | #include "ThunkGenerators.h" |
49 | #include <wtf/ScopedLambda.h> |
50 | #include <wtf/StringPrintStream.h> |
51 | |
52 | |
53 | namespace JSC { |
54 | #if USE(JSVALUE64) |
55 | |
56 | void JIT::emit_op_get_by_val(const Instruction* currentInstruction) |
57 | { |
58 | auto bytecode = currentInstruction->as<OpGetByVal>(); |
59 | auto& metadata = bytecode.metadata(m_codeBlock); |
60 | int dst = bytecode.m_dst.offset(); |
61 | int base = bytecode.m_base.offset(); |
62 | int property = bytecode.m_property.offset(); |
63 | ArrayProfile* profile = &metadata.m_arrayProfile; |
64 | |
65 | emitGetVirtualRegister(base, regT0); |
66 | emitGetVirtualRegister(property, regT1); |
67 | emitJumpSlowCaseIfNotJSCell(regT0, base); |
68 | emitArrayProfilingSiteWithCell(regT0, regT2, profile); |
69 | |
70 | JITGetByValGenerator gen( |
71 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), |
72 | JSValueRegs(regT0), JSValueRegs(regT1), JSValueRegs(regT0)); |
73 | if (isOperandConstantInt(property)) |
74 | gen.stubInfo()->propertyIsInt32 = true; |
75 | gen.generateFastPath(*this); |
76 | addSlowCase(gen.slowPathJump()); |
77 | m_getByVals.append(gen); |
78 | |
79 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
80 | emitPutVirtualRegister(dst); |
81 | } |
82 | |
83 | void JIT::emitSlow_op_get_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
84 | { |
85 | auto bytecode = currentInstruction->as<OpGetByVal>(); |
86 | int dst = bytecode.m_dst.offset(); |
87 | auto& metadata = bytecode.metadata(m_codeBlock); |
88 | ArrayProfile* profile = &metadata.m_arrayProfile; |
89 | |
90 | JITGetByValGenerator& gen = m_getByVals[m_getByValIndex]; |
91 | ++m_getByValIndex; |
92 | |
93 | linkAllSlowCases(iter); |
94 | |
95 | Label coldPathBegin = label(); |
96 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByValOptimize, dst, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), profile, regT0, regT1); |
97 | gen.reportSlowPathCall(coldPathBegin, call); |
98 | } |
99 | |
100 | void JIT::emit_op_put_by_val_direct(const Instruction* currentInstruction) |
101 | { |
102 | emit_op_put_by_val<OpPutByValDirect>(currentInstruction); |
103 | } |
104 | |
105 | template<typename Op> |
106 | void JIT::emit_op_put_by_val(const Instruction* currentInstruction) |
107 | { |
108 | auto bytecode = currentInstruction->as<Op>(); |
109 | auto& metadata = bytecode.metadata(m_codeBlock); |
110 | int base = bytecode.m_base.offset(); |
111 | int property = bytecode.m_property.offset(); |
112 | ArrayProfile* profile = &metadata.m_arrayProfile; |
113 | ByValInfo* byValInfo = m_codeBlock->addByValInfo(); |
114 | |
115 | emitGetVirtualRegister(base, regT0); |
116 | bool propertyNameIsIntegerConstant = isOperandConstantInt(property); |
117 | if (propertyNameIsIntegerConstant) |
118 | move(Imm32(getOperandConstantInt(property)), regT1); |
119 | else |
120 | emitGetVirtualRegister(property, regT1); |
121 | |
122 | emitJumpSlowCaseIfNotJSCell(regT0, base); |
123 | PatchableJump notIndex; |
124 | if (!propertyNameIsIntegerConstant) { |
125 | notIndex = emitPatchableJumpIfNotInt(regT1); |
126 | addSlowCase(notIndex); |
127 | // See comment in op_get_by_val. |
128 | zeroExtend32ToPtr(regT1, regT1); |
129 | } |
130 | emitArrayProfilingSiteWithCell(regT0, regT2, profile); |
131 | |
132 | PatchableJump badType; |
133 | JumpList slowCases; |
134 | |
135 | // FIXME: Maybe we should do this inline? |
136 | addSlowCase(branchTest32(NonZero, regT2, TrustedImm32(CopyOnWrite))); |
137 | and32(TrustedImm32(IndexingShapeMask), regT2); |
138 | |
139 | JITArrayMode mode = chooseArrayMode(profile); |
140 | switch (mode) { |
141 | case JITInt32: |
142 | slowCases = emitInt32PutByVal(bytecode, badType); |
143 | break; |
144 | case JITDouble: |
145 | slowCases = emitDoublePutByVal(bytecode, badType); |
146 | break; |
147 | case JITContiguous: |
148 | slowCases = emitContiguousPutByVal(bytecode, badType); |
149 | break; |
150 | case JITArrayStorage: |
151 | slowCases = emitArrayStoragePutByVal(bytecode, badType); |
152 | break; |
153 | default: |
154 | CRASH(); |
155 | break; |
156 | } |
157 | |
158 | addSlowCase(badType); |
159 | addSlowCase(slowCases); |
160 | |
161 | Label done = label(); |
162 | |
163 | m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeIndex, notIndex, badType, mode, profile, done, done)); |
164 | } |
165 | |
166 | template<typename Op> |
167 | JIT::JumpList JIT::emitGenericContiguousPutByVal(Op bytecode, PatchableJump& badType, IndexingType indexingShape) |
168 | { |
169 | auto& metadata = bytecode.metadata(m_codeBlock); |
170 | int value = bytecode.m_value.offset(); |
171 | ArrayProfile* profile = &metadata.m_arrayProfile; |
172 | |
173 | JumpList slowCases; |
174 | |
175 | badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape)); |
176 | |
177 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); |
178 | Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())); |
179 | |
180 | Label storeResult = label(); |
181 | emitGetVirtualRegister(value, regT3); |
182 | switch (indexingShape) { |
183 | case Int32Shape: |
184 | slowCases.append(branchIfNotInt32(regT3)); |
185 | store64(regT3, BaseIndex(regT2, regT1, TimesEight)); |
186 | break; |
187 | case DoubleShape: { |
188 | Jump notInt = branchIfNotInt32(regT3); |
189 | convertInt32ToDouble(regT3, fpRegT0); |
190 | Jump ready = jump(); |
191 | notInt.link(this); |
192 | add64(numberTagRegister, regT3); |
193 | move64ToDouble(regT3, fpRegT0); |
194 | slowCases.append(branchIfNaN(fpRegT0)); |
195 | ready.link(this); |
196 | storeDouble(fpRegT0, BaseIndex(regT2, regT1, TimesEight)); |
197 | break; |
198 | } |
199 | case ContiguousShape: |
200 | store64(regT3, BaseIndex(regT2, regT1, TimesEight)); |
201 | emitWriteBarrier(bytecode.m_base.offset(), value, ShouldFilterValue); |
202 | break; |
203 | default: |
204 | CRASH(); |
205 | break; |
206 | } |
207 | |
208 | Jump done = jump(); |
209 | outOfBounds.link(this); |
210 | |
211 | slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength()))); |
212 | |
213 | emitArrayProfileStoreToHoleSpecialCase(profile); |
214 | |
215 | add32(TrustedImm32(1), regT1, regT3); |
216 | store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength())); |
217 | jump().linkTo(storeResult, this); |
218 | |
219 | done.link(this); |
220 | |
221 | return slowCases; |
222 | } |
223 | |
224 | template<typename Op> |
225 | JIT::JumpList JIT::emitArrayStoragePutByVal(Op bytecode, PatchableJump& badType) |
226 | { |
227 | auto& metadata = bytecode.metadata(m_codeBlock); |
228 | int value = bytecode.m_value.offset(); |
229 | ArrayProfile* profile = &metadata.m_arrayProfile; |
230 | |
231 | JumpList slowCases; |
232 | |
233 | badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape)); |
234 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); |
235 | slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset()))); |
236 | |
237 | Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset())); |
238 | |
239 | Label storeResult(this); |
240 | emitGetVirtualRegister(value, regT3); |
241 | store64(regT3, BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset())); |
242 | emitWriteBarrier(bytecode.m_base.offset(), value, ShouldFilterValue); |
243 | Jump end = jump(); |
244 | |
245 | empty.link(this); |
246 | emitArrayProfileStoreToHoleSpecialCase(profile); |
247 | add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset())); |
248 | branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this); |
249 | |
250 | add32(TrustedImm32(1), regT1); |
251 | store32(regT1, Address(regT2, ArrayStorage::lengthOffset())); |
252 | sub32(TrustedImm32(1), regT1); |
253 | jump().linkTo(storeResult, this); |
254 | |
255 | end.link(this); |
256 | |
257 | return slowCases; |
258 | } |
259 | |
260 | template<typename Op> |
261 | JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Op bytecode, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases) |
262 | { |
263 | // base: regT0 |
264 | // property: regT1 |
265 | // scratch: regT2 |
266 | |
267 | int base = bytecode.m_base.offset(); |
268 | int value = bytecode.m_value.offset(); |
269 | |
270 | slowCases.append(branchIfNotCell(regT1)); |
271 | emitByValIdentifierCheck(byValInfo, regT1, regT1, propertyName, slowCases); |
272 | |
273 | // Write barrier breaks the registers. So after issuing the write barrier, |
274 | // reload the registers. |
275 | emitGetVirtualRegisters(base, regT0, value, regT1); |
276 | |
277 | JITPutByIdGenerator gen( |
278 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), |
279 | JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), putKind); |
280 | gen.generateFastPath(*this); |
281 | emitWriteBarrier(base, value, ShouldFilterBase); |
282 | doneCases.append(jump()); |
283 | |
284 | Label coldPathBegin = label(); |
285 | gen.slowPathJump().link(this); |
286 | |
287 | Call call = callOperation(gen.slowPathFunction(), TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT1, regT0, propertyName.impl()); |
288 | gen.reportSlowPathCall(coldPathBegin, call); |
289 | doneCases.append(jump()); |
290 | |
291 | return gen; |
292 | } |
293 | |
294 | void JIT::emitSlow_op_put_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
295 | { |
296 | bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct; |
297 | int base; |
298 | int property; |
299 | int value; |
300 | |
301 | auto load = [&](auto bytecode) { |
302 | base = bytecode.m_base.offset(); |
303 | property = bytecode.m_property.offset(); |
304 | value = bytecode.m_value.offset(); |
305 | }; |
306 | |
307 | if (isDirect) |
308 | load(currentInstruction->as<OpPutByValDirect>()); |
309 | else |
310 | load(currentInstruction->as<OpPutByVal>()); |
311 | |
312 | ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; |
313 | |
314 | linkAllSlowCases(iter); |
315 | Label slowPath = label(); |
316 | |
317 | emitGetVirtualRegister(base, regT0); |
318 | emitGetVirtualRegister(property, regT1); |
319 | emitGetVirtualRegister(value, regT2); |
320 | Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, regT2, byValInfo); |
321 | |
322 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; |
323 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; |
324 | m_byValInstructionIndex++; |
325 | } |
326 | |
327 | void JIT::emit_op_put_getter_by_id(const Instruction* currentInstruction) |
328 | { |
329 | auto bytecode = currentInstruction->as<OpPutGetterById>(); |
330 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
331 | int32_t options = bytecode.m_attributes; |
332 | emitGetVirtualRegister(bytecode.m_accessor.offset(), regT1); |
333 | callOperation(operationPutGetterById, TrustedImmPtr(m_codeBlock->globalObject()), regT0, m_codeBlock->identifier(bytecode.m_property).impl(), options, regT1); |
334 | } |
335 | |
336 | void JIT::emit_op_put_setter_by_id(const Instruction* currentInstruction) |
337 | { |
338 | auto bytecode = currentInstruction->as<OpPutSetterById>(); |
339 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
340 | int32_t options = bytecode.m_attributes; |
341 | emitGetVirtualRegister(bytecode.m_accessor.offset(), regT1); |
342 | callOperation(operationPutSetterById, TrustedImmPtr(m_codeBlock->globalObject()), regT0, m_codeBlock->identifier(bytecode.m_property).impl(), options, regT1); |
343 | } |
344 | |
345 | void JIT::emit_op_put_getter_setter_by_id(const Instruction* currentInstruction) |
346 | { |
347 | auto bytecode = currentInstruction->as<OpPutGetterSetterById>(); |
348 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
349 | int32_t attribute = bytecode.m_attributes; |
350 | emitGetVirtualRegister(bytecode.m_getter.offset(), regT1); |
351 | emitGetVirtualRegister(bytecode.m_setter.offset(), regT2); |
352 | callOperation(operationPutGetterSetter, TrustedImmPtr(m_codeBlock->globalObject()), regT0, m_codeBlock->identifier(bytecode.m_property).impl(), attribute, regT1, regT2); |
353 | } |
354 | |
355 | void JIT::emit_op_put_getter_by_val(const Instruction* currentInstruction) |
356 | { |
357 | auto bytecode = currentInstruction->as<OpPutGetterByVal>(); |
358 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
359 | emitGetVirtualRegister(bytecode.m_property.offset(), regT1); |
360 | int32_t attributes = bytecode.m_attributes; |
361 | emitGetVirtualRegister(bytecode.m_accessor, regT2); |
362 | callOperation(operationPutGetterByVal, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, attributes, regT2); |
363 | } |
364 | |
365 | void JIT::emit_op_put_setter_by_val(const Instruction* currentInstruction) |
366 | { |
367 | auto bytecode = currentInstruction->as<OpPutSetterByVal>(); |
368 | emitGetVirtualRegister(bytecode.m_base.offset(), regT0); |
369 | emitGetVirtualRegister(bytecode.m_property.offset(), regT1); |
370 | int32_t attributes = bytecode.m_attributes; |
371 | emitGetVirtualRegister(bytecode.m_accessor.offset(), regT2); |
372 | callOperation(operationPutSetterByVal, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, attributes, regT2); |
373 | } |
374 | |
375 | void JIT::emit_op_del_by_id(const Instruction* currentInstruction) |
376 | { |
377 | auto bytecode = currentInstruction->as<OpDelById>(); |
378 | int dst = bytecode.m_dst.offset(); |
379 | int base = bytecode.m_base.offset(); |
380 | int property = bytecode.m_property; |
381 | emitGetVirtualRegister(base, regT0); |
382 | callOperation(operationDeleteByIdJSResult, dst, TrustedImmPtr(m_codeBlock->globalObject()), regT0, m_codeBlock->identifier(property).impl()); |
383 | } |
384 | |
385 | void JIT::emit_op_del_by_val(const Instruction* currentInstruction) |
386 | { |
387 | auto bytecode = currentInstruction->as<OpDelByVal>(); |
388 | int dst = bytecode.m_dst.offset(); |
389 | int base = bytecode.m_base.offset(); |
390 | int property = bytecode.m_property.offset(); |
391 | emitGetVirtualRegister(base, regT0); |
392 | emitGetVirtualRegister(property, regT1); |
393 | callOperation(operationDeleteByValJSResult, dst, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1); |
394 | } |
395 | |
396 | void JIT::emit_op_try_get_by_id(const Instruction* currentInstruction) |
397 | { |
398 | auto bytecode = currentInstruction->as<OpTryGetById>(); |
399 | int resultVReg = bytecode.m_dst.offset(); |
400 | int baseVReg = bytecode.m_base.offset(); |
401 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
402 | |
403 | emitGetVirtualRegister(baseVReg, regT0); |
404 | |
405 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
406 | |
407 | JITGetByIdGenerator gen( |
408 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), |
409 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::TryGetById); |
410 | gen.generateFastPath(*this); |
411 | addSlowCase(gen.slowPathJump()); |
412 | m_getByIds.append(gen); |
413 | |
414 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
415 | emitPutVirtualRegister(resultVReg); |
416 | } |
417 | |
418 | void JIT::emitSlow_op_try_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
419 | { |
420 | linkAllSlowCases(iter); |
421 | |
422 | auto bytecode = currentInstruction->as<OpTryGetById>(); |
423 | int resultVReg = bytecode.m_dst.offset(); |
424 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
425 | |
426 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; |
427 | |
428 | Label coldPathBegin = label(); |
429 | |
430 | Call call = callOperation(operationTryGetByIdOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, ident->impl()); |
431 | |
432 | gen.reportSlowPathCall(coldPathBegin, call); |
433 | } |
434 | |
435 | void JIT::emit_op_get_by_id_direct(const Instruction* currentInstruction) |
436 | { |
437 | auto bytecode = currentInstruction->as<OpGetByIdDirect>(); |
438 | int resultVReg = bytecode.m_dst.offset(); |
439 | int baseVReg = bytecode.m_base.offset(); |
440 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
441 | |
442 | emitGetVirtualRegister(baseVReg, regT0); |
443 | |
444 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
445 | |
446 | JITGetByIdGenerator gen( |
447 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), |
448 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::GetByIdDirect); |
449 | gen.generateFastPath(*this); |
450 | addSlowCase(gen.slowPathJump()); |
451 | m_getByIds.append(gen); |
452 | |
453 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
454 | emitPutVirtualRegister(resultVReg); |
455 | } |
456 | |
457 | void JIT::emitSlow_op_get_by_id_direct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
458 | { |
459 | linkAllSlowCases(iter); |
460 | |
461 | auto bytecode = currentInstruction->as<OpGetByIdDirect>(); |
462 | int resultVReg = bytecode.m_dst.offset(); |
463 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
464 | |
465 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; |
466 | |
467 | Label coldPathBegin = label(); |
468 | |
469 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdDirectOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, ident->impl()); |
470 | |
471 | gen.reportSlowPathCall(coldPathBegin, call); |
472 | } |
473 | |
474 | void JIT::emit_op_get_by_id(const Instruction* currentInstruction) |
475 | { |
476 | auto bytecode = currentInstruction->as<OpGetById>(); |
477 | auto& metadata = bytecode.metadata(m_codeBlock); |
478 | int resultVReg = bytecode.m_dst.offset(); |
479 | int baseVReg = bytecode.m_base.offset(); |
480 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
481 | |
482 | emitGetVirtualRegister(baseVReg, regT0); |
483 | |
484 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
485 | |
486 | if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) { |
487 | Jump notArrayLengthMode = branch8(NotEqual, AbsoluteAddress(&metadata.m_modeMetadata.mode), TrustedImm32(static_cast<uint8_t>(GetByIdMode::ArrayLength))); |
488 | emitArrayProfilingSiteWithCell(regT0, regT1, &metadata.m_modeMetadata.arrayLengthMode.arrayProfile); |
489 | notArrayLengthMode.link(this); |
490 | } |
491 | |
492 | JITGetByIdGenerator gen( |
493 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), |
494 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::GetById); |
495 | gen.generateFastPath(*this); |
496 | addSlowCase(gen.slowPathJump()); |
497 | m_getByIds.append(gen); |
498 | |
499 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
500 | emitPutVirtualRegister(resultVReg); |
501 | } |
502 | |
503 | void JIT::emit_op_get_by_id_with_this(const Instruction* currentInstruction) |
504 | { |
505 | auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); |
506 | int resultVReg = bytecode.m_dst.offset(); |
507 | int baseVReg = bytecode.m_base.offset(); |
508 | int thisVReg = bytecode.m_thisValue.offset(); |
509 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
510 | |
511 | emitGetVirtualRegister(baseVReg, regT0); |
512 | emitGetVirtualRegister(thisVReg, regT1); |
513 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
514 | emitJumpSlowCaseIfNotJSCell(regT1, thisVReg); |
515 | |
516 | JITGetByIdWithThisGenerator gen( |
517 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), |
518 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), JSValueRegs(regT1)); |
519 | gen.generateFastPath(*this); |
520 | addSlowCase(gen.slowPathJump()); |
521 | m_getByIdsWithThis.append(gen); |
522 | |
523 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
524 | emitPutVirtualRegister(resultVReg); |
525 | } |
526 | |
527 | void JIT::emitSlow_op_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
528 | { |
529 | linkAllSlowCases(iter); |
530 | |
531 | auto bytecode = currentInstruction->as<OpGetById>(); |
532 | int resultVReg = bytecode.m_dst.offset(); |
533 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
534 | |
535 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; |
536 | |
537 | Label coldPathBegin = label(); |
538 | |
539 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, ident->impl()); |
540 | |
541 | gen.reportSlowPathCall(coldPathBegin, call); |
542 | } |
543 | |
544 | void JIT::emitSlow_op_get_by_id_with_this(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
545 | { |
546 | linkAllSlowCases(iter); |
547 | |
548 | auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); |
549 | int resultVReg = bytecode.m_dst.offset(); |
550 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
551 | |
552 | JITGetByIdWithThisGenerator& gen = m_getByIdsWithThis[m_getByIdWithThisIndex++]; |
553 | |
554 | Label coldPathBegin = label(); |
555 | |
556 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdWithThisOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, regT1, ident->impl()); |
557 | |
558 | gen.reportSlowPathCall(coldPathBegin, call); |
559 | } |
560 | |
561 | void JIT::emit_op_put_by_id(const Instruction* currentInstruction) |
562 | { |
563 | auto bytecode = currentInstruction->as<OpPutById>(); |
564 | int baseVReg = bytecode.m_base.offset(); |
565 | int valueVReg = bytecode.m_value.offset(); |
566 | bool direct = !!(bytecode.m_flags & PutByIdIsDirect); |
567 | |
568 | // In order to be able to patch both the Structure, and the object offset, we store one pointer, |
569 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code |
570 | // such that the Structure & offset are always at the same distance from this. |
571 | |
572 | emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); |
573 | |
574 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
575 | |
576 | JITPutByIdGenerator gen( |
577 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), |
578 | JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), |
579 | direct ? Direct : NotDirect); |
580 | |
581 | gen.generateFastPath(*this); |
582 | addSlowCase(gen.slowPathJump()); |
583 | |
584 | emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase); |
585 | |
586 | m_putByIds.append(gen); |
587 | } |
588 | |
589 | void JIT::emitSlow_op_put_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
590 | { |
591 | linkAllSlowCases(iter); |
592 | |
593 | auto bytecode = currentInstruction->as<OpPutById>(); |
594 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
595 | |
596 | Label coldPathBegin(this); |
597 | |
598 | JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++]; |
599 | |
600 | Call call = callOperation(gen.slowPathFunction(), TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT1, regT0, ident->impl()); |
601 | |
602 | gen.reportSlowPathCall(coldPathBegin, call); |
603 | } |
604 | |
605 | void JIT::emit_op_in_by_id(const Instruction* currentInstruction) |
606 | { |
607 | auto bytecode = currentInstruction->as<OpInById>(); |
608 | int resultVReg = bytecode.m_dst.offset(); |
609 | int baseVReg = bytecode.m_base.offset(); |
610 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
611 | |
612 | emitGetVirtualRegister(baseVReg, regT0); |
613 | |
614 | emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); |
615 | |
616 | JITInByIdGenerator gen( |
617 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), |
618 | ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0)); |
619 | gen.generateFastPath(*this); |
620 | addSlowCase(gen.slowPathJump()); |
621 | m_inByIds.append(gen); |
622 | |
623 | emitPutVirtualRegister(resultVReg); |
624 | } |
625 | |
626 | void JIT::emitSlow_op_in_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
627 | { |
628 | linkAllSlowCases(iter); |
629 | |
630 | auto bytecode = currentInstruction->as<OpInById>(); |
631 | int resultVReg = bytecode.m_dst.offset(); |
632 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
633 | |
634 | JITInByIdGenerator& gen = m_inByIds[m_inByIdIndex++]; |
635 | |
636 | Label coldPathBegin = label(); |
637 | |
638 | Call call = callOperation(operationInByIdOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, ident->impl()); |
639 | |
640 | gen.reportSlowPathCall(coldPathBegin, call); |
641 | } |
642 | |
643 | void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks) |
644 | { |
645 | if (!needsVarInjectionChecks) |
646 | return; |
647 | addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated))); |
648 | } |
649 | |
650 | void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth) |
651 | { |
652 | emitVarInjectionCheck(needsVarInjectionChecks); |
653 | emitGetVirtualRegister(scope, regT0); |
654 | for (unsigned i = 0; i < depth; ++i) |
655 | loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); |
656 | emitPutVirtualRegister(dst); |
657 | } |
658 | |
659 | void JIT::emit_op_resolve_scope(const Instruction* currentInstruction) |
660 | { |
661 | auto bytecode = currentInstruction->as<OpResolveScope>(); |
662 | auto& metadata = bytecode.metadata(m_codeBlock); |
663 | int dst = bytecode.m_dst.offset(); |
664 | int scope = bytecode.m_scope.offset(); |
665 | ResolveType resolveType = metadata.m_resolveType; |
666 | unsigned depth = metadata.m_localScopeDepth; |
667 | |
668 | auto emitCode = [&] (ResolveType resolveType) { |
669 | switch (resolveType) { |
670 | case GlobalProperty: |
671 | case GlobalPropertyWithVarInjectionChecks: { |
672 | JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock); |
673 | RELEASE_ASSERT(constantScope); |
674 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
675 | load32(&metadata.m_globalLexicalBindingEpoch, regT1); |
676 | addSlowCase(branch32(NotEqual, AbsoluteAddress(m_codeBlock->globalObject()->addressOfGlobalLexicalBindingEpoch()), regT1)); |
677 | move(TrustedImmPtr(constantScope), regT0); |
678 | emitPutVirtualRegister(dst); |
679 | break; |
680 | } |
681 | |
682 | case GlobalVar: |
683 | case GlobalVarWithVarInjectionChecks: |
684 | case GlobalLexicalVar: |
685 | case GlobalLexicalVarWithVarInjectionChecks: { |
686 | JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock); |
687 | RELEASE_ASSERT(constantScope); |
688 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
689 | move(TrustedImmPtr(constantScope), regT0); |
690 | emitPutVirtualRegister(dst); |
691 | break; |
692 | } |
693 | case ClosureVar: |
694 | case ClosureVarWithVarInjectionChecks: |
695 | emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth); |
696 | break; |
697 | case ModuleVar: |
698 | move(TrustedImmPtr(metadata.m_lexicalEnvironment.get()), regT0); |
699 | emitPutVirtualRegister(dst); |
700 | break; |
701 | case Dynamic: |
702 | addSlowCase(jump()); |
703 | break; |
704 | case LocalClosureVar: |
705 | case UnresolvedProperty: |
706 | case UnresolvedPropertyWithVarInjectionChecks: |
707 | RELEASE_ASSERT_NOT_REACHED(); |
708 | } |
709 | }; |
710 | |
711 | switch (resolveType) { |
712 | case GlobalProperty: |
713 | case GlobalPropertyWithVarInjectionChecks: { |
714 | JumpList skipToEnd; |
715 | load32(&metadata.m_resolveType, regT0); |
716 | |
717 | Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(resolveType)); |
718 | emitCode(resolveType); |
719 | skipToEnd.append(jump()); |
720 | |
721 | notGlobalProperty.link(this); |
722 | emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar); |
723 | |
724 | skipToEnd.link(this); |
725 | break; |
726 | } |
727 | case UnresolvedProperty: |
728 | case UnresolvedPropertyWithVarInjectionChecks: { |
729 | JumpList skipToEnd; |
730 | load32(&metadata.m_resolveType, regT0); |
731 | |
732 | Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty)); |
733 | emitCode(GlobalProperty); |
734 | skipToEnd.append(jump()); |
735 | notGlobalProperty.link(this); |
736 | |
737 | Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks)); |
738 | emitCode(GlobalPropertyWithVarInjectionChecks); |
739 | skipToEnd.append(jump()); |
740 | notGlobalPropertyWithVarInjections.link(this); |
741 | |
742 | Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar)); |
743 | emitCode(GlobalLexicalVar); |
744 | skipToEnd.append(jump()); |
745 | notGlobalLexicalVar.link(this); |
746 | |
747 | Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks)); |
748 | emitCode(GlobalLexicalVarWithVarInjectionChecks); |
749 | skipToEnd.append(jump()); |
750 | notGlobalLexicalVarWithVarInjections.link(this); |
751 | |
752 | addSlowCase(jump()); |
753 | skipToEnd.link(this); |
754 | break; |
755 | } |
756 | |
757 | default: |
758 | emitCode(resolveType); |
759 | break; |
760 | } |
761 | } |
762 | |
763 | void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot) |
764 | { |
765 | loadPtr(structureSlot, regT1); |
766 | emitGetVirtualRegister(scope, regT0); |
767 | addSlowCase(branchTestPtr(Zero, regT1)); |
768 | load32(Address(regT1, Structure::structureIDOffset()), regT1); |
769 | addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1)); |
770 | } |
771 | |
772 | void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg reg) |
773 | { |
774 | loadPtr(operand, reg); |
775 | } |
776 | |
777 | void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg reg) |
778 | { |
779 | loadPtr(operand, reg); |
780 | loadPtr(reg, reg); |
781 | } |
782 | |
783 | void JIT::emitGetClosureVar(int scope, uintptr_t operand) |
784 | { |
785 | emitGetVirtualRegister(scope, regT0); |
786 | loadPtr(Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register)), regT0); |
787 | } |
788 | |
789 | void JIT::emit_op_get_from_scope(const Instruction* currentInstruction) |
790 | { |
791 | auto bytecode = currentInstruction->as<OpGetFromScope>(); |
792 | auto& metadata = bytecode.metadata(m_codeBlock); |
793 | int dst = bytecode.m_dst.offset(); |
794 | int scope = bytecode.m_scope.offset(); |
795 | ResolveType resolveType = metadata.m_getPutInfo.resolveType(); |
796 | Structure** structureSlot = metadata.m_structure.slot(); |
797 | uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.m_operand); |
798 | |
799 | auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { |
800 | switch (resolveType) { |
801 | case GlobalProperty: |
802 | case GlobalPropertyWithVarInjectionChecks: { |
803 | emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection since we don't cache structures for anything but the GlobalObject. Additionally, resolve_scope handles checking for the var injection. |
804 | GPRReg base = regT0; |
805 | GPRReg result = regT0; |
806 | GPRReg offset = regT1; |
807 | GPRReg scratch = regT2; |
808 | |
809 | jitAssert(scopedLambda<Jump(void)>([&] () -> Jump { |
810 | return branchPtr(Equal, base, TrustedImmPtr(m_codeBlock->globalObject())); |
811 | })); |
812 | |
813 | load32(operandSlot, offset); |
814 | if (!ASSERT_DISABLED) { |
815 | Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset)); |
816 | abortWithReason(JITOffsetIsNotOutOfLine); |
817 | isOutOfLine.link(this); |
818 | } |
819 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); |
820 | neg32(offset); |
821 | signExtend32ToPtr(offset, offset); |
822 | load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result); |
823 | break; |
824 | } |
825 | case GlobalVar: |
826 | case GlobalVarWithVarInjectionChecks: |
827 | case GlobalLexicalVar: |
828 | case GlobalLexicalVarWithVarInjectionChecks: |
829 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
830 | if (indirectLoadForOperand) |
831 | emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0); |
832 | else |
833 | emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0); |
834 | if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check. |
835 | addSlowCase(branchIfEmpty(regT0)); |
836 | break; |
837 | case ClosureVar: |
838 | case ClosureVarWithVarInjectionChecks: |
839 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
840 | emitGetClosureVar(scope, *operandSlot); |
841 | break; |
842 | case Dynamic: |
843 | addSlowCase(jump()); |
844 | break; |
845 | case LocalClosureVar: |
846 | case ModuleVar: |
847 | case UnresolvedProperty: |
848 | case UnresolvedPropertyWithVarInjectionChecks: |
849 | RELEASE_ASSERT_NOT_REACHED(); |
850 | } |
851 | }; |
852 | |
853 | switch (resolveType) { |
854 | case GlobalProperty: |
855 | case GlobalPropertyWithVarInjectionChecks: { |
856 | JumpList skipToEnd; |
857 | load32(&metadata.m_getPutInfo, regT0); |
858 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
859 | |
860 | Jump isNotGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(resolveType)); |
861 | emitCode(resolveType, false); |
862 | skipToEnd.append(jump()); |
863 | |
864 | isNotGlobalProperty.link(this); |
865 | emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar, true); |
866 | |
867 | skipToEnd.link(this); |
868 | break; |
869 | } |
870 | case UnresolvedProperty: |
871 | case UnresolvedPropertyWithVarInjectionChecks: { |
872 | JumpList skipToEnd; |
873 | load32(&metadata.m_getPutInfo, regT0); |
874 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
875 | |
876 | Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty)); |
877 | Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks)); |
878 | isGlobalProperty.link(this); |
879 | emitCode(GlobalProperty, false); |
880 | skipToEnd.append(jump()); |
881 | notGlobalPropertyWithVarInjections.link(this); |
882 | |
883 | Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar)); |
884 | emitCode(GlobalLexicalVar, true); |
885 | skipToEnd.append(jump()); |
886 | notGlobalLexicalVar.link(this); |
887 | |
888 | Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks)); |
889 | emitCode(GlobalLexicalVarWithVarInjectionChecks, true); |
890 | skipToEnd.append(jump()); |
891 | notGlobalLexicalVarWithVarInjections.link(this); |
892 | |
893 | addSlowCase(jump()); |
894 | |
895 | skipToEnd.link(this); |
896 | break; |
897 | } |
898 | |
899 | default: |
900 | emitCode(resolveType, false); |
901 | break; |
902 | } |
903 | emitPutVirtualRegister(dst); |
904 | emitValueProfilingSite(metadata); |
905 | } |
906 | |
907 | void JIT::emitSlow_op_get_from_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
908 | { |
909 | linkAllSlowCases(iter); |
910 | |
911 | auto bytecode = currentInstruction->as<OpGetFromScope>(); |
912 | int dst = bytecode.m_dst.offset(); |
913 | callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetFromScope, dst, TrustedImmPtr(m_codeBlock->globalObject()), currentInstruction); |
914 | } |
915 | |
916 | void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set) |
917 | { |
918 | emitGetVirtualRegister(value, regT0); |
919 | emitNotifyWrite(set); |
920 | storePtr(regT0, operand); |
921 | } |
922 | void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet) |
923 | { |
924 | emitGetVirtualRegister(value, regT0); |
925 | loadPtr(indirectWatchpointSet, regT1); |
926 | emitNotifyWrite(regT1); |
927 | loadPtr(addressOfOperand, regT1); |
928 | storePtr(regT0, regT1); |
929 | } |
930 | |
931 | void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set) |
932 | { |
933 | emitGetVirtualRegister(value, regT1); |
934 | emitGetVirtualRegister(scope, regT0); |
935 | emitNotifyWrite(set); |
936 | storePtr(regT1, Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register))); |
937 | } |
938 | |
939 | void JIT::emit_op_put_to_scope(const Instruction* currentInstruction) |
940 | { |
941 | auto bytecode = currentInstruction->as<OpPutToScope>(); |
942 | auto& metadata = bytecode.metadata(m_codeBlock); |
943 | int scope = bytecode.m_scope.offset(); |
944 | int value = bytecode.m_value.offset(); |
945 | GetPutInfo getPutInfo = copiedGetPutInfo(bytecode); |
946 | ResolveType resolveType = getPutInfo.resolveType(); |
947 | Structure** structureSlot = metadata.m_structure.slot(); |
948 | uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.m_operand); |
949 | |
950 | auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { |
951 | switch (resolveType) { |
952 | case GlobalProperty: |
953 | case GlobalPropertyWithVarInjectionChecks: { |
954 | emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection since we don't cache structures for anything but the GlobalObject. Additionally, resolve_scope handles checking for the var injection. |
955 | emitGetVirtualRegister(value, regT2); |
956 | |
957 | jitAssert(scopedLambda<Jump(void)>([&] () -> Jump { |
958 | return branchPtr(Equal, regT0, TrustedImmPtr(m_codeBlock->globalObject())); |
959 | })); |
960 | |
961 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); |
962 | loadPtr(operandSlot, regT1); |
963 | negPtr(regT1); |
964 | storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue))); |
965 | emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue); |
966 | break; |
967 | } |
968 | case GlobalVar: |
969 | case GlobalVarWithVarInjectionChecks: |
970 | case GlobalLexicalVar: |
971 | case GlobalLexicalVarWithVarInjectionChecks: { |
972 | JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock); |
973 | RELEASE_ASSERT(constantScope); |
974 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
975 | if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { |
976 | // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically. |
977 | if (indirectLoadForOperand) |
978 | emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0); |
979 | else |
980 | emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0); |
981 | addSlowCase(branchIfEmpty(regT0)); |
982 | } |
983 | if (indirectLoadForOperand) |
984 | emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, &metadata.m_watchpointSet); |
985 | else |
986 | emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, metadata.m_watchpointSet); |
987 | emitWriteBarrier(constantScope, value, ShouldFilterValue); |
988 | break; |
989 | } |
990 | case LocalClosureVar: |
991 | case ClosureVar: |
992 | case ClosureVarWithVarInjectionChecks: |
993 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
994 | emitPutClosureVar(scope, *operandSlot, value, metadata.m_watchpointSet); |
995 | emitWriteBarrier(scope, value, ShouldFilterValue); |
996 | break; |
997 | case ModuleVar: |
998 | case Dynamic: |
999 | addSlowCase(jump()); |
1000 | break; |
1001 | case UnresolvedProperty: |
1002 | case UnresolvedPropertyWithVarInjectionChecks: |
1003 | RELEASE_ASSERT_NOT_REACHED(); |
1004 | break; |
1005 | } |
1006 | }; |
1007 | |
1008 | switch (resolveType) { |
1009 | case GlobalProperty: |
1010 | case GlobalPropertyWithVarInjectionChecks: { |
1011 | JumpList skipToEnd; |
1012 | load32(&metadata.m_getPutInfo, regT0); |
1013 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
1014 | |
1015 | Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(resolveType)); |
1016 | Jump isGlobalLexicalVar = branch32(Equal, regT0, TrustedImm32(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar)); |
1017 | addSlowCase(jump()); // Dynamic, it can happen if we attempt to put a value to already-initialized const binding. |
1018 | |
1019 | isGlobalLexicalVar.link(this); |
1020 | emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar, true); |
1021 | skipToEnd.append(jump()); |
1022 | |
1023 | isGlobalProperty.link(this); |
1024 | emitCode(resolveType, false); |
1025 | skipToEnd.link(this); |
1026 | break; |
1027 | } |
1028 | case UnresolvedProperty: |
1029 | case UnresolvedPropertyWithVarInjectionChecks: { |
1030 | JumpList skipToEnd; |
1031 | load32(&metadata.m_getPutInfo, regT0); |
1032 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
1033 | |
1034 | Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty)); |
1035 | Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks)); |
1036 | isGlobalProperty.link(this); |
1037 | emitCode(GlobalProperty, false); |
1038 | skipToEnd.append(jump()); |
1039 | notGlobalPropertyWithVarInjections.link(this); |
1040 | |
1041 | Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar)); |
1042 | emitCode(GlobalLexicalVar, true); |
1043 | skipToEnd.append(jump()); |
1044 | notGlobalLexicalVar.link(this); |
1045 | |
1046 | Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks)); |
1047 | emitCode(GlobalLexicalVarWithVarInjectionChecks, true); |
1048 | skipToEnd.append(jump()); |
1049 | notGlobalLexicalVarWithVarInjections.link(this); |
1050 | |
1051 | addSlowCase(jump()); |
1052 | |
1053 | skipToEnd.link(this); |
1054 | break; |
1055 | } |
1056 | |
1057 | default: |
1058 | emitCode(resolveType, false); |
1059 | break; |
1060 | } |
1061 | } |
1062 | |
1063 | void JIT::emitSlow_op_put_to_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1064 | { |
1065 | linkAllSlowCases(iter); |
1066 | |
1067 | auto bytecode = currentInstruction->as<OpPutToScope>(); |
1068 | ResolveType resolveType = copiedGetPutInfo(bytecode).resolveType(); |
1069 | if (resolveType == ModuleVar) { |
1070 | JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error); |
1071 | slowPathCall.call(); |
1072 | } else |
1073 | callOperation(operationPutToScope, TrustedImmPtr(m_codeBlock->globalObject()), currentInstruction); |
1074 | } |
1075 | |
1076 | void JIT::emit_op_get_from_arguments(const Instruction* currentInstruction) |
1077 | { |
1078 | auto bytecode = currentInstruction->as<OpGetFromArguments>(); |
1079 | int dst = bytecode.m_dst.offset(); |
1080 | int arguments = bytecode.m_arguments.offset(); |
1081 | int index = bytecode.m_index; |
1082 | |
1083 | emitGetVirtualRegister(arguments, regT0); |
1084 | load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0); |
1085 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
1086 | emitPutVirtualRegister(dst); |
1087 | } |
1088 | |
1089 | void JIT::emit_op_put_to_arguments(const Instruction* currentInstruction) |
1090 | { |
1091 | auto bytecode = currentInstruction->as<OpPutToArguments>(); |
1092 | int arguments = bytecode.m_arguments.offset(); |
1093 | int index = bytecode.m_index; |
1094 | int value = bytecode.m_value.offset(); |
1095 | |
1096 | emitGetVirtualRegister(arguments, regT0); |
1097 | emitGetVirtualRegister(value, regT1); |
1098 | store64(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>))); |
1099 | |
1100 | emitWriteBarrier(arguments, value, ShouldFilterValue); |
1101 | } |
1102 | |
1103 | void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode) |
1104 | { |
1105 | Jump valueNotCell; |
1106 | if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) { |
1107 | emitGetVirtualRegister(value, regT0); |
1108 | valueNotCell = branchIfNotCell(regT0); |
1109 | } |
1110 | |
1111 | emitGetVirtualRegister(owner, regT0); |
1112 | Jump ownerNotCell; |
1113 | if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase) |
1114 | ownerNotCell = branchIfNotCell(regT0); |
1115 | |
1116 | Jump ownerIsRememberedOrInEden = barrierBranch(vm(), regT0, regT1); |
1117 | callOperation(operationWriteBarrierSlowPath, &vm(), regT0); |
1118 | ownerIsRememberedOrInEden.link(this); |
1119 | |
1120 | if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase) |
1121 | ownerNotCell.link(this); |
1122 | if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) |
1123 | valueNotCell.link(this); |
1124 | } |
1125 | |
1126 | void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode) |
1127 | { |
1128 | emitGetVirtualRegister(value, regT0); |
1129 | Jump valueNotCell; |
1130 | if (mode == ShouldFilterValue) |
1131 | valueNotCell = branchIfNotCell(regT0); |
1132 | |
1133 | emitWriteBarrier(owner); |
1134 | |
1135 | if (mode == ShouldFilterValue) |
1136 | valueNotCell.link(this); |
1137 | } |
1138 | |
1139 | void JIT::emit_op_get_internal_field(const Instruction* currentInstruction) |
1140 | { |
1141 | auto bytecode = currentInstruction->as<OpGetInternalField>(); |
1142 | auto& metadata = bytecode.metadata(m_codeBlock); |
1143 | int dst = bytecode.m_dst.offset(); |
1144 | int base = bytecode.m_base.offset(); |
1145 | unsigned index = bytecode.m_index; |
1146 | |
1147 | emitGetVirtualRegister(base, regT1); |
1148 | loadPtr(Address(regT1, JSInternalFieldObjectImpl<>::offsetOfInternalField(index)), regT0); |
1149 | |
1150 | emitValueProfilingSite(metadata); |
1151 | emitPutVirtualRegister(dst); |
1152 | } |
1153 | |
1154 | void JIT::emit_op_put_internal_field(const Instruction* currentInstruction) |
1155 | { |
1156 | auto bytecode = currentInstruction->as<OpPutInternalField>(); |
1157 | int base = bytecode.m_base.offset(); |
1158 | int value = bytecode.m_value.offset(); |
1159 | unsigned index = bytecode.m_index; |
1160 | |
1161 | emitGetVirtualRegister(base, regT0); |
1162 | emitGetVirtualRegister(value, regT1); |
1163 | storePtr(regT1, Address(regT0, JSInternalFieldObjectImpl<>::offsetOfInternalField(index))); |
1164 | emitWriteBarrier(base, value, ShouldFilterValue); |
1165 | } |
1166 | |
1167 | #else // USE(JSVALUE64) |
1168 | |
1169 | void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode) |
1170 | { |
1171 | Jump valueNotCell; |
1172 | if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) { |
1173 | emitLoadTag(value, regT0); |
1174 | valueNotCell = branchIfNotCell(regT0); |
1175 | } |
1176 | |
1177 | emitLoad(owner, regT0, regT1); |
1178 | Jump ownerNotCell; |
1179 | if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue) |
1180 | ownerNotCell = branchIfNotCell(regT0); |
1181 | |
1182 | Jump ownerIsRememberedOrInEden = barrierBranch(vm(), regT1, regT2); |
1183 | callOperation(operationWriteBarrierSlowPath, &vm(), regT1); |
1184 | ownerIsRememberedOrInEden.link(this); |
1185 | |
1186 | if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue) |
1187 | ownerNotCell.link(this); |
1188 | if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) |
1189 | valueNotCell.link(this); |
1190 | } |
1191 | |
1192 | void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode) |
1193 | { |
1194 | Jump valueNotCell; |
1195 | if (mode == ShouldFilterValue) { |
1196 | emitLoadTag(value, regT0); |
1197 | valueNotCell = branchIfNotCell(regT0); |
1198 | } |
1199 | |
1200 | emitWriteBarrier(owner); |
1201 | |
1202 | if (mode == ShouldFilterValue) |
1203 | valueNotCell.link(this); |
1204 | } |
1205 | |
1206 | #endif // USE(JSVALUE64) |
1207 | |
1208 | void JIT::emitWriteBarrier(JSCell* owner) |
1209 | { |
1210 | Jump ownerIsRememberedOrInEden = barrierBranch(vm(), owner, regT0); |
1211 | callOperation(operationWriteBarrierSlowPath, &vm(), owner); |
1212 | ownerIsRememberedOrInEden.link(this); |
1213 | } |
1214 | |
1215 | void JIT::emitByValIdentifierCheck(ByValInfo* byValInfo, RegisterID cell, RegisterID scratch, const Identifier& propertyName, JumpList& slowCases) |
1216 | { |
1217 | if (propertyName.isSymbol()) |
1218 | slowCases.append(branchPtr(NotEqual, cell, TrustedImmPtr(byValInfo->cachedSymbol.get()))); |
1219 | else { |
1220 | slowCases.append(branchIfNotString(cell)); |
1221 | loadPtr(Address(cell, JSString::offsetOfValue()), scratch); |
1222 | slowCases.append(branchPtr(NotEqual, scratch, TrustedImmPtr(propertyName.impl()))); |
1223 | } |
1224 | } |
1225 | |
1226 | template<typename Op> |
1227 | void JIT::privateCompilePutByVal(const ConcurrentJSLocker&, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) |
1228 | { |
1229 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
1230 | auto bytecode = currentInstruction->as<Op>(); |
1231 | |
1232 | PatchableJump badType; |
1233 | JumpList slowCases; |
1234 | |
1235 | bool needsLinkForWriteBarrier = false; |
1236 | |
1237 | switch (arrayMode) { |
1238 | case JITInt32: |
1239 | slowCases = emitInt32PutByVal(bytecode, badType); |
1240 | break; |
1241 | case JITDouble: |
1242 | slowCases = emitDoublePutByVal(bytecode, badType); |
1243 | break; |
1244 | case JITContiguous: |
1245 | slowCases = emitContiguousPutByVal(bytecode, badType); |
1246 | needsLinkForWriteBarrier = true; |
1247 | break; |
1248 | case JITArrayStorage: |
1249 | slowCases = emitArrayStoragePutByVal(bytecode, badType); |
1250 | needsLinkForWriteBarrier = true; |
1251 | break; |
1252 | default: |
1253 | TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode); |
1254 | if (isInt(type)) |
1255 | slowCases = emitIntTypedArrayPutByVal(bytecode, badType, type); |
1256 | else |
1257 | slowCases = emitFloatTypedArrayPutByVal(bytecode, badType, type); |
1258 | break; |
1259 | } |
1260 | |
1261 | Jump done = jump(); |
1262 | |
1263 | LinkBuffer patchBuffer(*this, m_codeBlock); |
1264 | patchBuffer.link(badType, byValInfo->slowPathTarget); |
1265 | patchBuffer.link(slowCases, byValInfo->slowPathTarget); |
1266 | patchBuffer.link(done, byValInfo->badTypeDoneTarget); |
1267 | if (needsLinkForWriteBarrier) { |
1268 | ASSERT(removeCodePtrTag(m_calls.last().callee.executableAddress()) == removeCodePtrTag(operationWriteBarrierSlowPath)); |
1269 | patchBuffer.link(m_calls.last().from, m_calls.last().callee); |
1270 | } |
1271 | |
1272 | bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct; |
1273 | if (!isDirect) { |
1274 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1275 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1276 | "Baseline put_by_val stub for %s, return point %p" , toCString(*m_codeBlock).data(), returnAddress.value()); |
1277 | |
1278 | } else { |
1279 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1280 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1281 | "Baseline put_by_val_direct stub for %s, return point %p" , toCString(*m_codeBlock).data(), returnAddress.value()); |
1282 | } |
1283 | MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code())); |
1284 | MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric)); |
1285 | } |
1286 | // This function is only consumed from another translation unit (JITOperations.cpp), |
1287 | // so we list off the two expected specializations in advance. |
1288 | template void JIT::privateCompilePutByVal<OpPutByVal>(const ConcurrentJSLocker&, ByValInfo*, ReturnAddressPtr, JITArrayMode); |
1289 | template void JIT::privateCompilePutByVal<OpPutByValDirect>(const ConcurrentJSLocker&, ByValInfo*, ReturnAddressPtr, JITArrayMode); |
1290 | |
1291 | template<typename Op> |
1292 | void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName) |
1293 | { |
1294 | ASSERT((putKind == Direct && Op::opcodeID == op_put_by_val_direct) || (putKind == NotDirect && Op::opcodeID == op_put_by_val)); |
1295 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
1296 | auto bytecode = currentInstruction->as<Op>(); |
1297 | |
1298 | JumpList doneCases; |
1299 | JumpList slowCases; |
1300 | |
1301 | JITPutByIdGenerator gen = emitPutByValWithCachedId(byValInfo, bytecode, putKind, propertyName, doneCases, slowCases); |
1302 | |
1303 | ConcurrentJSLocker locker(m_codeBlock->m_lock); |
1304 | LinkBuffer patchBuffer(*this, m_codeBlock); |
1305 | patchBuffer.link(slowCases, byValInfo->slowPathTarget); |
1306 | patchBuffer.link(doneCases, byValInfo->badTypeDoneTarget); |
1307 | if (!m_exceptionChecks.empty()) |
1308 | patchBuffer.link(m_exceptionChecks, byValInfo->exceptionHandler); |
1309 | |
1310 | for (const auto& callSite : m_calls) { |
1311 | if (callSite.callee) |
1312 | patchBuffer.link(callSite.from, callSite.callee); |
1313 | } |
1314 | gen.finalize(patchBuffer, patchBuffer); |
1315 | |
1316 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1317 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1318 | "Baseline put_by_val%s with cached property name '%s' stub for %s, return point %p" , (putKind == Direct) ? "_direct" : "" , propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()); |
1319 | byValInfo->stubInfo = gen.stubInfo(); |
1320 | |
1321 | MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code())); |
1322 | MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric)); |
1323 | } |
1324 | // This function is only consumed from another translation unit (JITOperations.cpp), |
1325 | // so we list off the two expected specializations in advance. |
1326 | template void JIT::privateCompilePutByValWithCachedId<OpPutByVal>(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&); |
1327 | template void JIT::privateCompilePutByValWithCachedId<OpPutByValDirect>(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&); |
1328 | |
1329 | JIT::JumpList JIT::emitDoubleLoad(const Instruction*, PatchableJump& badType) |
1330 | { |
1331 | #if USE(JSVALUE64) |
1332 | RegisterID base = regT0; |
1333 | RegisterID property = regT1; |
1334 | RegisterID indexing = regT2; |
1335 | RegisterID scratch = regT3; |
1336 | #else |
1337 | RegisterID base = regT0; |
1338 | RegisterID property = regT2; |
1339 | RegisterID indexing = regT1; |
1340 | RegisterID scratch = regT3; |
1341 | #endif |
1342 | |
1343 | JumpList slowCases; |
1344 | |
1345 | badType = patchableBranch32(NotEqual, indexing, TrustedImm32(DoubleShape)); |
1346 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); |
1347 | slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength()))); |
1348 | loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0); |
1349 | slowCases.append(branchIfNaN(fpRegT0)); |
1350 | |
1351 | return slowCases; |
1352 | } |
1353 | |
1354 | JIT::JumpList JIT::emitContiguousLoad(const Instruction*, PatchableJump& badType, IndexingType expectedShape) |
1355 | { |
1356 | #if USE(JSVALUE64) |
1357 | RegisterID base = regT0; |
1358 | RegisterID property = regT1; |
1359 | RegisterID indexing = regT2; |
1360 | JSValueRegs result = JSValueRegs(regT0); |
1361 | RegisterID scratch = regT3; |
1362 | #else |
1363 | RegisterID base = regT0; |
1364 | RegisterID property = regT2; |
1365 | RegisterID indexing = regT1; |
1366 | JSValueRegs result = JSValueRegs(regT1, regT0); |
1367 | RegisterID scratch = regT3; |
1368 | #endif |
1369 | |
1370 | JumpList slowCases; |
1371 | |
1372 | badType = patchableBranch32(NotEqual, indexing, TrustedImm32(expectedShape)); |
1373 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); |
1374 | slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength()))); |
1375 | loadValue(BaseIndex(scratch, property, TimesEight), result); |
1376 | slowCases.append(branchIfEmpty(result)); |
1377 | |
1378 | return slowCases; |
1379 | } |
1380 | |
1381 | JIT::JumpList JIT::emitArrayStorageLoad(const Instruction*, PatchableJump& badType) |
1382 | { |
1383 | #if USE(JSVALUE64) |
1384 | RegisterID base = regT0; |
1385 | RegisterID property = regT1; |
1386 | RegisterID indexing = regT2; |
1387 | JSValueRegs result = JSValueRegs(regT0); |
1388 | RegisterID scratch = regT3; |
1389 | #else |
1390 | RegisterID base = regT0; |
1391 | RegisterID property = regT2; |
1392 | RegisterID indexing = regT1; |
1393 | JSValueRegs result = JSValueRegs(regT1, regT0); |
1394 | RegisterID scratch = regT3; |
1395 | #endif |
1396 | |
1397 | JumpList slowCases; |
1398 | |
1399 | add32(TrustedImm32(-ArrayStorageShape), indexing, scratch); |
1400 | badType = patchableBranch32(Above, scratch, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)); |
1401 | |
1402 | loadPtr(Address(base, JSObject::butterflyOffset()), scratch); |
1403 | slowCases.append(branch32(AboveOrEqual, property, Address(scratch, ArrayStorage::vectorLengthOffset()))); |
1404 | |
1405 | loadValue(BaseIndex(scratch, property, TimesEight, ArrayStorage::vectorOffset()), result); |
1406 | slowCases.append(branchIfEmpty(result)); |
1407 | |
1408 | return slowCases; |
1409 | } |
1410 | |
1411 | template<typename Op> |
1412 | JIT::JumpList JIT::emitIntTypedArrayPutByVal(Op bytecode, PatchableJump& badType, TypedArrayType type) |
1413 | { |
1414 | auto& metadata = bytecode.metadata(m_codeBlock); |
1415 | ArrayProfile* profile = &metadata.m_arrayProfile; |
1416 | ASSERT(isInt(type)); |
1417 | |
1418 | int value = bytecode.m_value.offset(); |
1419 | |
1420 | #if USE(JSVALUE64) |
1421 | RegisterID base = regT0; |
1422 | RegisterID property = regT1; |
1423 | RegisterID earlyScratch = regT3; |
1424 | RegisterID lateScratch = regT2; |
1425 | RegisterID lateScratch2 = regT4; |
1426 | #else |
1427 | RegisterID base = regT0; |
1428 | RegisterID property = regT2; |
1429 | RegisterID earlyScratch = regT3; |
1430 | RegisterID lateScratch = regT1; |
1431 | RegisterID lateScratch2 = regT4; |
1432 | #endif |
1433 | |
1434 | JumpList slowCases; |
1435 | |
1436 | load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch); |
1437 | badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type))); |
1438 | load32(Address(base, JSArrayBufferView::offsetOfLength()), lateScratch2); |
1439 | Jump inBounds = branch32(Below, property, lateScratch2); |
1440 | emitArrayProfileOutOfBoundsSpecialCase(profile); |
1441 | slowCases.append(jump()); |
1442 | inBounds.link(this); |
1443 | |
1444 | #if USE(JSVALUE64) |
1445 | emitGetVirtualRegister(value, earlyScratch); |
1446 | slowCases.append(branchIfNotInt32(earlyScratch)); |
1447 | #else |
1448 | emitLoad(value, lateScratch, earlyScratch); |
1449 | slowCases.append(branchIfNotInt32(lateScratch)); |
1450 | #endif |
1451 | |
1452 | // We would be loading this into base as in get_by_val, except that the slow |
1453 | // path expects the base to be unclobbered. |
1454 | loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch); |
1455 | cageConditionally(Gigacage::Primitive, lateScratch, lateScratch2, lateScratch2); |
1456 | |
1457 | if (isClamped(type)) { |
1458 | ASSERT(elementSize(type) == 1); |
1459 | ASSERT(!JSC::isSigned(type)); |
1460 | Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff)); |
1461 | Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff)); |
1462 | xor32(earlyScratch, earlyScratch); |
1463 | Jump clamped = jump(); |
1464 | tooBig.link(this); |
1465 | move(TrustedImm32(0xff), earlyScratch); |
1466 | clamped.link(this); |
1467 | inBounds.link(this); |
1468 | } |
1469 | |
1470 | switch (elementSize(type)) { |
1471 | case 1: |
1472 | store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne)); |
1473 | break; |
1474 | case 2: |
1475 | store16(earlyScratch, BaseIndex(lateScratch, property, TimesTwo)); |
1476 | break; |
1477 | case 4: |
1478 | store32(earlyScratch, BaseIndex(lateScratch, property, TimesFour)); |
1479 | break; |
1480 | default: |
1481 | CRASH(); |
1482 | } |
1483 | |
1484 | return slowCases; |
1485 | } |
1486 | |
1487 | template<typename Op> |
1488 | JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Op bytecode, PatchableJump& badType, TypedArrayType type) |
1489 | { |
1490 | auto& metadata = bytecode.metadata(m_codeBlock); |
1491 | ArrayProfile* profile = &metadata.m_arrayProfile; |
1492 | ASSERT(isFloat(type)); |
1493 | |
1494 | int value = bytecode.m_value.offset(); |
1495 | |
1496 | #if USE(JSVALUE64) |
1497 | RegisterID base = regT0; |
1498 | RegisterID property = regT1; |
1499 | RegisterID earlyScratch = regT3; |
1500 | RegisterID lateScratch = regT2; |
1501 | RegisterID lateScratch2 = regT4; |
1502 | #else |
1503 | RegisterID base = regT0; |
1504 | RegisterID property = regT2; |
1505 | RegisterID earlyScratch = regT3; |
1506 | RegisterID lateScratch = regT1; |
1507 | RegisterID lateScratch2 = regT4; |
1508 | #endif |
1509 | |
1510 | JumpList slowCases; |
1511 | |
1512 | load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch); |
1513 | badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type))); |
1514 | load32(Address(base, JSArrayBufferView::offsetOfLength()), lateScratch2); |
1515 | Jump inBounds = branch32(Below, property, lateScratch2); |
1516 | emitArrayProfileOutOfBoundsSpecialCase(profile); |
1517 | slowCases.append(jump()); |
1518 | inBounds.link(this); |
1519 | |
1520 | #if USE(JSVALUE64) |
1521 | emitGetVirtualRegister(value, earlyScratch); |
1522 | Jump doubleCase = branchIfNotInt32(earlyScratch); |
1523 | convertInt32ToDouble(earlyScratch, fpRegT0); |
1524 | Jump ready = jump(); |
1525 | doubleCase.link(this); |
1526 | slowCases.append(branchIfNotNumber(earlyScratch)); |
1527 | add64(numberTagRegister, earlyScratch); |
1528 | move64ToDouble(earlyScratch, fpRegT0); |
1529 | ready.link(this); |
1530 | #else |
1531 | emitLoad(value, lateScratch, earlyScratch); |
1532 | Jump doubleCase = branchIfNotInt32(lateScratch); |
1533 | convertInt32ToDouble(earlyScratch, fpRegT0); |
1534 | Jump ready = jump(); |
1535 | doubleCase.link(this); |
1536 | slowCases.append(branch32(Above, lateScratch, TrustedImm32(JSValue::LowestTag))); |
1537 | moveIntsToDouble(earlyScratch, lateScratch, fpRegT0, fpRegT1); |
1538 | ready.link(this); |
1539 | #endif |
1540 | |
1541 | // We would be loading this into base as in get_by_val, except that the slow |
1542 | // path expects the base to be unclobbered. |
1543 | loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch); |
1544 | cageConditionally(Gigacage::Primitive, lateScratch, lateScratch2, lateScratch2); |
1545 | |
1546 | switch (elementSize(type)) { |
1547 | case 4: |
1548 | convertDoubleToFloat(fpRegT0, fpRegT0); |
1549 | storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour)); |
1550 | break; |
1551 | case 8: |
1552 | storeDouble(fpRegT0, BaseIndex(lateScratch, property, TimesEight)); |
1553 | break; |
1554 | default: |
1555 | CRASH(); |
1556 | } |
1557 | |
1558 | return slowCases; |
1559 | } |
1560 | |
1561 | template void JIT::emit_op_put_by_val<OpPutByVal>(const Instruction*); |
1562 | |
1563 | } // namespace JSC |
1564 | |
1565 | #endif // ENABLE(JIT) |
1566 | |