1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | |
28 | #if ENABLE(JIT) |
29 | #if USE(JSVALUE32_64) |
30 | #include "JIT.h" |
31 | |
32 | #include "CodeBlock.h" |
33 | #include "DirectArguments.h" |
34 | #include "GCAwareJITStubRoutine.h" |
35 | #include "InterpreterInlines.h" |
36 | #include "JITInlines.h" |
37 | #include "JSArray.h" |
38 | #include "JSFunction.h" |
39 | #include "JSLexicalEnvironment.h" |
40 | #include "LinkBuffer.h" |
41 | #include "OpcodeInlines.h" |
42 | #include "ResultType.h" |
43 | #include "SlowPathCall.h" |
44 | #include "StructureStubInfo.h" |
45 | #include <wtf/StringPrintStream.h> |
46 | |
47 | |
48 | namespace JSC { |
49 | |
50 | void JIT::emit_op_put_getter_by_id(const Instruction* currentInstruction) |
51 | { |
52 | auto bytecode = currentInstruction->as<OpPutGetterById>(); |
53 | int base = bytecode.m_base.offset(); |
54 | int property = bytecode.m_property; |
55 | int options = bytecode.m_attributes; |
56 | int getter = bytecode.m_accessor.offset(); |
57 | |
58 | emitLoadPayload(base, regT1); |
59 | emitLoadPayload(getter, regT3); |
60 | callOperation(operationPutGetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3); |
61 | } |
62 | |
63 | void JIT::emit_op_put_setter_by_id(const Instruction* currentInstruction) |
64 | { |
65 | auto bytecode = currentInstruction->as<OpPutSetterById>(); |
66 | int base = bytecode.m_base.offset(); |
67 | int property = bytecode.m_property; |
68 | int options = bytecode.m_attributes; |
69 | int setter = bytecode.m_accessor.offset(); |
70 | |
71 | emitLoadPayload(base, regT1); |
72 | emitLoadPayload(setter, regT3); |
73 | callOperation(operationPutSetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3); |
74 | } |
75 | |
76 | void JIT::emit_op_put_getter_setter_by_id(const Instruction* currentInstruction) |
77 | { |
78 | auto bytecode = currentInstruction->as<OpPutGetterSetterById>(); |
79 | int base = bytecode.m_base.offset(); |
80 | int property = bytecode.m_property; |
81 | int attributes = bytecode.m_attributes; |
82 | int getter = bytecode.m_getter.offset(); |
83 | int setter = bytecode.m_setter.offset(); |
84 | |
85 | emitLoadPayload(base, regT1); |
86 | emitLoadPayload(getter, regT3); |
87 | emitLoadPayload(setter, regT4); |
88 | callOperation(operationPutGetterSetter, regT1, m_codeBlock->identifier(property).impl(), attributes, regT3, regT4); |
89 | } |
90 | |
91 | void JIT::emit_op_put_getter_by_val(const Instruction* currentInstruction) |
92 | { |
93 | auto bytecode = currentInstruction->as<OpPutGetterByVal>(); |
94 | int base = bytecode.m_base.offset(); |
95 | int property = bytecode.m_property.offset(); |
96 | int32_t attributes = bytecode.m_attributes; |
97 | int getter = bytecode.m_accessor.offset(); |
98 | |
99 | emitLoadPayload(base, regT2); |
100 | emitLoad(property, regT1, regT0); |
101 | emitLoadPayload(getter, regT3); |
102 | callOperation(operationPutGetterByVal, regT2, JSValueRegs(regT1, regT0), attributes, regT3); |
103 | } |
104 | |
105 | void JIT::emit_op_put_setter_by_val(const Instruction* currentInstruction) |
106 | { |
107 | auto bytecode = currentInstruction->as<OpPutSetterByVal>(); |
108 | int base = bytecode.m_base.offset(); |
109 | int property = bytecode.m_property.offset(); |
110 | int32_t attributes = bytecode.m_attributes; |
111 | int setter = bytecode.m_accessor.offset(); |
112 | |
113 | emitLoadPayload(base, regT2); |
114 | emitLoad(property, regT1, regT0); |
115 | emitLoadPayload(setter, regT3); |
116 | callOperation(operationPutSetterByVal, regT2, JSValueRegs(regT1, regT0), attributes, regT3); |
117 | } |
118 | |
119 | void JIT::emit_op_del_by_id(const Instruction* currentInstruction) |
120 | { |
121 | auto bytecode = currentInstruction->as<OpDelById>(); |
122 | int dst = bytecode.m_dst.offset(); |
123 | int base = bytecode.m_base.offset(); |
124 | int property = bytecode.m_property; |
125 | emitLoad(base, regT1, regT0); |
126 | callOperation(operationDeleteByIdJSResult, dst, JSValueRegs(regT1, regT0), m_codeBlock->identifier(property).impl()); |
127 | } |
128 | |
129 | void JIT::emit_op_del_by_val(const Instruction* currentInstruction) |
130 | { |
131 | auto bytecode = currentInstruction->as<OpDelByVal>(); |
132 | int dst = bytecode.m_dst.offset(); |
133 | int base = bytecode.m_base.offset(); |
134 | int property = bytecode.m_property.offset(); |
135 | emitLoad2(base, regT1, regT0, property, regT3, regT2); |
136 | callOperation(operationDeleteByValJSResult, dst, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
137 | } |
138 | |
139 | void JIT::emit_op_get_by_val(const Instruction* currentInstruction) |
140 | { |
141 | auto bytecode = currentInstruction->as<OpGetByVal>(); |
142 | auto& metadata = bytecode.metadata(m_codeBlock); |
143 | int dst = bytecode.m_dst.offset(); |
144 | int base = bytecode.m_base.offset(); |
145 | int property = bytecode.m_property.offset(); |
146 | ArrayProfile* profile = &metadata.m_arrayProfile; |
147 | ByValInfo* byValInfo = m_codeBlock->addByValInfo(); |
148 | |
149 | emitLoad2(base, regT1, regT0, property, regT3, regT2); |
150 | |
151 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
152 | PatchableJump notIndex = patchableBranch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)); |
153 | addSlowCase(notIndex); |
154 | emitArrayProfilingSiteWithCell(regT0, regT1, profile); |
155 | and32(TrustedImm32(IndexingShapeMask), regT1); |
156 | |
157 | PatchableJump badType; |
158 | JumpList slowCases; |
159 | |
160 | JITArrayMode mode = chooseArrayMode(profile); |
161 | switch (mode) { |
162 | case JITInt32: |
163 | slowCases = emitInt32GetByVal(currentInstruction, badType); |
164 | break; |
165 | case JITDouble: |
166 | slowCases = emitDoubleGetByVal(currentInstruction, badType); |
167 | break; |
168 | case JITContiguous: |
169 | slowCases = emitContiguousGetByVal(currentInstruction, badType); |
170 | break; |
171 | case JITArrayStorage: |
172 | slowCases = emitArrayStorageGetByVal(currentInstruction, badType); |
173 | break; |
174 | default: |
175 | CRASH(); |
176 | } |
177 | |
178 | addSlowCase(badType); |
179 | addSlowCase(slowCases); |
180 | |
181 | Label done = label(); |
182 | |
183 | if (!ASSERT_DISABLED) { |
184 | Jump resultOK = branchIfNotEmpty(regT1); |
185 | abortWithReason(JITGetByValResultIsNotEmpty); |
186 | resultOK.link(this); |
187 | } |
188 | |
189 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
190 | emitStore(dst, regT1, regT0); |
191 | |
192 | Label nextHotPath = label(); |
193 | |
194 | m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath)); |
195 | } |
196 | |
197 | JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, OpGetByVal bytecode, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases) |
198 | { |
199 | // base: tag(regT1), payload(regT0) |
200 | // property: tag(regT3), payload(regT2) |
201 | // scratch: regT4 |
202 | |
203 | int dst = bytecode.m_dst.offset(); |
204 | |
205 | slowCases.append(branchIfNotCell(regT3)); |
206 | emitByValIdentifierCheck(byValInfo, regT2, regT4, propertyName, slowCases); |
207 | |
208 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
209 | JITGetByIdGenerator gen( |
210 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), |
211 | propertyName.impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::Get); |
212 | gen.generateFastPath(*this); |
213 | |
214 | fastDoneCase = jump(); |
215 | |
216 | Label coldPathBegin = label(); |
217 | gen.slowPathJump().link(this); |
218 | |
219 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, dst, gen.stubInfo(), JSValueRegs(regT1, regT0), propertyName.impl()); |
220 | gen.reportSlowPathCall(coldPathBegin, call); |
221 | slowDoneCase = jump(); |
222 | |
223 | return gen; |
224 | } |
225 | |
226 | void JIT::emitSlow_op_get_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
227 | { |
228 | auto bytecode = currentInstruction->as<OpGetByVal>(); |
229 | int dst = bytecode.m_dst.offset(); |
230 | int base = bytecode.m_base.offset(); |
231 | int property = bytecode.m_property.offset(); |
232 | ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; |
233 | |
234 | linkSlowCaseIfNotJSCell(iter, base); // base cell check |
235 | linkSlowCase(iter); // property int32 check |
236 | |
237 | Jump nonCell = jump(); |
238 | linkSlowCase(iter); // base array check |
239 | Jump notString = branchIfNotString(regT0); |
240 | emitNakedCall(CodeLocationLabel<NoPtrTag>(m_vm->getCTIStub(stringGetByValGenerator).retaggedCode<NoPtrTag>())); |
241 | Jump failed = branchTestPtr(Zero, regT0); |
242 | emitStoreCell(dst, regT0); |
243 | emitJumpSlowToHot(jump(), currentInstruction->size()); |
244 | failed.link(this); |
245 | notString.link(this); |
246 | nonCell.link(this); |
247 | |
248 | linkSlowCase(iter); // vector length check |
249 | linkSlowCase(iter); // empty value |
250 | |
251 | Label slowPath = label(); |
252 | |
253 | emitLoad(base, regT1, regT0); |
254 | emitLoad(property, regT3, regT2); |
255 | Call call = callOperation(operationGetByValOptimize, dst, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2), byValInfo); |
256 | |
257 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; |
258 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; |
259 | m_byValInstructionIndex++; |
260 | |
261 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
262 | } |
263 | |
264 | void JIT::emit_op_put_by_val_direct(const Instruction* currentInstruction) |
265 | { |
266 | emit_op_put_by_val<OpPutByValDirect>(currentInstruction); |
267 | } |
268 | |
269 | template<typename Op> |
270 | void JIT::emit_op_put_by_val(const Instruction* currentInstruction) |
271 | { |
272 | auto bytecode = currentInstruction->as<Op>(); |
273 | auto& metadata = bytecode.metadata(m_codeBlock); |
274 | int base = bytecode.m_base.offset(); |
275 | int property = bytecode.m_property.offset(); |
276 | ArrayProfile* profile = &metadata.m_arrayProfile; |
277 | ByValInfo* byValInfo = m_codeBlock->addByValInfo(); |
278 | |
279 | emitLoad2(base, regT1, regT0, property, regT3, regT2); |
280 | |
281 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
282 | PatchableJump notIndex = patchableBranch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)); |
283 | addSlowCase(notIndex); |
284 | emitArrayProfilingSiteWithCell(regT0, regT1, profile); |
285 | |
286 | PatchableJump badType; |
287 | JumpList slowCases; |
288 | |
289 | // FIXME: Maybe we should do this inline? |
290 | addSlowCase(branchTest32(NonZero, regT1, TrustedImm32(CopyOnWrite))); |
291 | and32(TrustedImm32(IndexingShapeMask), regT1); |
292 | |
293 | JITArrayMode mode = chooseArrayMode(profile); |
294 | switch (mode) { |
295 | case JITInt32: |
296 | slowCases = emitInt32PutByVal(bytecode, badType); |
297 | break; |
298 | case JITDouble: |
299 | slowCases = emitDoublePutByVal(bytecode, badType); |
300 | break; |
301 | case JITContiguous: |
302 | slowCases = emitContiguousPutByVal(bytecode, badType); |
303 | break; |
304 | case JITArrayStorage: |
305 | slowCases = emitArrayStoragePutByVal(bytecode, badType); |
306 | break; |
307 | default: |
308 | CRASH(); |
309 | break; |
310 | } |
311 | |
312 | addSlowCase(badType); |
313 | addSlowCase(slowCases); |
314 | |
315 | Label done = label(); |
316 | |
317 | m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done)); |
318 | } |
319 | |
320 | template <typename Op> |
321 | JIT::JumpList JIT::emitGenericContiguousPutByVal(Op bytecode, PatchableJump& badType, IndexingType indexingShape) |
322 | { |
323 | auto& metadata = bytecode.metadata(m_codeBlock); |
324 | int base = bytecode.m_base.offset(); |
325 | int value = bytecode.m_value.offset(); |
326 | ArrayProfile* profile = &metadata.m_arrayProfile; |
327 | |
328 | JumpList slowCases; |
329 | |
330 | badType = patchableBranch32(NotEqual, regT1, TrustedImm32(ContiguousShape)); |
331 | |
332 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3); |
333 | Jump outOfBounds = branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())); |
334 | |
335 | Label storeResult = label(); |
336 | emitLoad(value, regT1, regT0); |
337 | switch (indexingShape) { |
338 | case Int32Shape: |
339 | slowCases.append(branchIfNotInt32(regT1)); |
340 | store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); |
341 | store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
342 | break; |
343 | case ContiguousShape: |
344 | store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); |
345 | store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
346 | emitLoad(base, regT2, regT3); |
347 | emitWriteBarrier(base, value, ShouldFilterValue); |
348 | break; |
349 | case DoubleShape: { |
350 | Jump notInt = branchIfNotInt32(regT1); |
351 | convertInt32ToDouble(regT0, fpRegT0); |
352 | Jump ready = jump(); |
353 | notInt.link(this); |
354 | moveIntsToDouble(regT0, regT1, fpRegT0, fpRegT1); |
355 | slowCases.append(branchIfNaN(fpRegT0)); |
356 | ready.link(this); |
357 | storeDouble(fpRegT0, BaseIndex(regT3, regT2, TimesEight)); |
358 | break; |
359 | } |
360 | default: |
361 | CRASH(); |
362 | break; |
363 | } |
364 | |
365 | Jump done = jump(); |
366 | |
367 | outOfBounds.link(this); |
368 | slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfVectorLength()))); |
369 | |
370 | emitArrayProfileStoreToHoleSpecialCase(profile); |
371 | |
372 | add32(TrustedImm32(1), regT2, regT1); |
373 | store32(regT1, Address(regT3, Butterfly::offsetOfPublicLength())); |
374 | jump().linkTo(storeResult, this); |
375 | |
376 | done.link(this); |
377 | |
378 | return slowCases; |
379 | } |
380 | |
381 | template <typename Op> |
382 | JIT::JumpList JIT::emitArrayStoragePutByVal(Op bytecode, PatchableJump& badType) |
383 | { |
384 | auto& metadata = bytecode.metadata(m_codeBlock); |
385 | int base = bytecode.m_base.offset(); |
386 | int value = bytecode.m_value.offset(); |
387 | ArrayProfile* profile = &metadata.m_arrayProfile; |
388 | |
389 | JumpList slowCases; |
390 | |
391 | badType = patchableBranch32(NotEqual, regT1, TrustedImm32(ArrayStorageShape)); |
392 | |
393 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3); |
394 | slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset()))); |
395 | |
396 | Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); |
397 | |
398 | Label storeResult(this); |
399 | emitLoad(value, regT1, regT0); |
400 | store32(regT0, BaseIndex(regT3, regT2, TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload |
401 | store32(regT1, BaseIndex(regT3, regT2, TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag |
402 | Jump end = jump(); |
403 | |
404 | empty.link(this); |
405 | emitArrayProfileStoreToHoleSpecialCase(profile); |
406 | add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); |
407 | branch32(Below, regT2, Address(regT3, ArrayStorage::lengthOffset())).linkTo(storeResult, this); |
408 | |
409 | add32(TrustedImm32(1), regT2, regT0); |
410 | store32(regT0, Address(regT3, ArrayStorage::lengthOffset())); |
411 | jump().linkTo(storeResult, this); |
412 | |
413 | end.link(this); |
414 | |
415 | emitWriteBarrier(base, value, ShouldFilterValue); |
416 | |
417 | return slowCases; |
418 | } |
419 | |
420 | template <typename Op> |
421 | JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Op bytecode, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases) |
422 | { |
423 | // base: tag(regT1), payload(regT0) |
424 | // property: tag(regT3), payload(regT2) |
425 | |
426 | int base = bytecode.m_base.offset(); |
427 | int value = bytecode.m_value.offset(); |
428 | |
429 | slowCases.append(branchIfNotCell(regT3)); |
430 | emitByValIdentifierCheck(byValInfo, regT2, regT2, propertyName, slowCases); |
431 | |
432 | // Write barrier breaks the registers. So after issuing the write barrier, |
433 | // reload the registers. |
434 | emitWriteBarrier(base, value, ShouldFilterBase); |
435 | emitLoadPayload(base, regT0); |
436 | emitLoad(value, regT3, regT2); |
437 | |
438 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
439 | JITPutByIdGenerator gen( |
440 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), |
441 | JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, m_codeBlock->ecmaMode(), putKind); |
442 | gen.generateFastPath(*this); |
443 | doneCases.append(jump()); |
444 | |
445 | Label coldPathBegin = label(); |
446 | gen.slowPathJump().link(this); |
447 | |
448 | // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag. |
449 | emitLoadTag(base, regT1); |
450 | |
451 | Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), JSValueRegs(regT3, regT2), JSValueRegs(regT1, regT0), propertyName.impl()); |
452 | gen.reportSlowPathCall(coldPathBegin, call); |
453 | doneCases.append(jump()); |
454 | |
455 | return gen; |
456 | } |
457 | |
458 | void JIT::emitSlow_op_put_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
459 | { |
460 | bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct; |
461 | int base; |
462 | int property; |
463 | int value; |
464 | |
465 | auto load = [&](auto bytecode) { |
466 | base = bytecode.m_base.offset(); |
467 | property = bytecode.m_property.offset(); |
468 | value = bytecode.m_value.offset(); |
469 | }; |
470 | |
471 | if (isDirect) |
472 | load(currentInstruction->as<OpPutByValDirect>()); |
473 | else |
474 | load(currentInstruction->as<OpPutByVal>()); |
475 | |
476 | ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; |
477 | |
478 | linkAllSlowCases(iter); |
479 | Label slowPath = label(); |
480 | |
481 | // The register selection below is chosen to reduce register swapping on ARM. |
482 | // Swapping shouldn't happen on other platforms. |
483 | emitLoad(base, regT2, regT1); |
484 | emitLoad(property, regT3, regT0); |
485 | emitLoad(value, regT5, regT4); |
486 | Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, JSValueRegs(regT2, regT1), JSValueRegs(regT3, regT0), JSValueRegs(regT5, regT4), byValInfo); |
487 | |
488 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; |
489 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; |
490 | m_byValInstructionIndex++; |
491 | } |
492 | |
493 | void JIT::emit_op_try_get_by_id(const Instruction* currentInstruction) |
494 | { |
495 | auto bytecode = currentInstruction->as<OpTryGetById>(); |
496 | int dst = bytecode.m_dst.offset(); |
497 | int base = bytecode.m_base.offset(); |
498 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
499 | |
500 | emitLoad(base, regT1, regT0); |
501 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
502 | |
503 | JITGetByIdGenerator gen( |
504 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), |
505 | ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::TryGet); |
506 | gen.generateFastPath(*this); |
507 | addSlowCase(gen.slowPathJump()); |
508 | m_getByIds.append(gen); |
509 | |
510 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
511 | emitStore(dst, regT1, regT0); |
512 | } |
513 | |
514 | void JIT::emitSlow_op_try_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
515 | { |
516 | linkAllSlowCases(iter); |
517 | |
518 | auto bytecode = currentInstruction->as<OpTryGetById>(); |
519 | int resultVReg = bytecode.m_dst.offset(); |
520 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
521 | |
522 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; |
523 | |
524 | Label coldPathBegin = label(); |
525 | |
526 | Call call = callOperation(operationTryGetByIdOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), ident->impl()); |
527 | |
528 | gen.reportSlowPathCall(coldPathBegin, call); |
529 | } |
530 | |
531 | |
532 | void JIT::emit_op_get_by_id_direct(const Instruction* currentInstruction) |
533 | { |
534 | auto bytecode = currentInstruction->as<OpGetByIdDirect>(); |
535 | int dst = bytecode.m_dst.offset(); |
536 | int base = bytecode.m_base.offset(); |
537 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
538 | |
539 | emitLoad(base, regT1, regT0); |
540 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
541 | |
542 | JITGetByIdGenerator gen( |
543 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), |
544 | ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::GetDirect); |
545 | gen.generateFastPath(*this); |
546 | addSlowCase(gen.slowPathJump()); |
547 | m_getByIds.append(gen); |
548 | |
549 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
550 | emitStore(dst, regT1, regT0); |
551 | } |
552 | |
553 | void JIT::emitSlow_op_get_by_id_direct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
554 | { |
555 | linkAllSlowCases(iter); |
556 | |
557 | auto bytecode = currentInstruction->as<OpGetByIdDirect>(); |
558 | int resultVReg = bytecode.m_dst.offset(); |
559 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
560 | |
561 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; |
562 | |
563 | Label coldPathBegin = label(); |
564 | |
565 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdDirectOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), ident->impl()); |
566 | |
567 | gen.reportSlowPathCall(coldPathBegin, call); |
568 | } |
569 | |
570 | |
571 | void JIT::emit_op_get_by_id(const Instruction* currentInstruction) |
572 | { |
573 | auto bytecode = currentInstruction->as<OpGetById>(); |
574 | auto& metadata = bytecode.metadata(m_codeBlock); |
575 | int dst = bytecode.m_dst.offset(); |
576 | int base = bytecode.m_base.offset(); |
577 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
578 | |
579 | emitLoad(base, regT1, regT0); |
580 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
581 | |
582 | if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) { |
583 | Jump notArrayLengthMode = branch8(NotEqual, AbsoluteAddress(&metadata.m_modeMetadata.mode), TrustedImm32(static_cast<uint8_t>(GetByIdMode::ArrayLength))); |
584 | emitArrayProfilingSiteWithCell(regT0, regT2, &metadata.m_modeMetadata.arrayLengthMode.arrayProfile); |
585 | notArrayLengthMode.link(this); |
586 | } |
587 | |
588 | JITGetByIdGenerator gen( |
589 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), |
590 | ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::Get); |
591 | gen.generateFastPath(*this); |
592 | addSlowCase(gen.slowPathJump()); |
593 | m_getByIds.append(gen); |
594 | |
595 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
596 | emitStore(dst, regT1, regT0); |
597 | } |
598 | |
599 | void JIT::emitSlow_op_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
600 | { |
601 | linkAllSlowCases(iter); |
602 | |
603 | auto bytecode = currentInstruction->as<OpGetById>(); |
604 | int resultVReg = bytecode.m_dst.offset(); |
605 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
606 | |
607 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; |
608 | |
609 | Label coldPathBegin = label(); |
610 | |
611 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), ident->impl()); |
612 | |
613 | gen.reportSlowPathCall(coldPathBegin, call); |
614 | } |
615 | |
616 | void JIT::emit_op_get_by_id_with_this(const Instruction* currentInstruction) |
617 | { |
618 | auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); |
619 | int dst = bytecode.m_dst.offset(); |
620 | int base = bytecode.m_base.offset(); |
621 | int thisVReg = bytecode.m_thisValue.offset(); |
622 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
623 | |
624 | emitLoad(base, regT1, regT0); |
625 | emitLoad(thisVReg, regT4, regT3); |
626 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
627 | emitJumpSlowCaseIfNotJSCell(thisVReg, regT4); |
628 | |
629 | JITGetByIdWithThisGenerator gen( |
630 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), |
631 | ident->impl(), JSValueRegs(regT1, regT0), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT4, regT3), AccessType::GetWithThis); |
632 | gen.generateFastPath(*this); |
633 | addSlowCase(gen.slowPathJump()); |
634 | m_getByIdsWithThis.append(gen); |
635 | |
636 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
637 | emitStore(dst, regT1, regT0); |
638 | } |
639 | |
640 | void JIT::emitSlow_op_get_by_id_with_this(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
641 | { |
642 | linkAllSlowCases(iter); |
643 | |
644 | auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); |
645 | int resultVReg = bytecode.m_dst.offset(); |
646 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
647 | |
648 | JITGetByIdWithThisGenerator& gen = m_getByIdsWithThis[m_getByIdWithThisIndex++]; |
649 | |
650 | Label coldPathBegin = label(); |
651 | |
652 | Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), JSValueRegs(regT4, regT3), ident->impl()); |
653 | |
654 | gen.reportSlowPathCall(coldPathBegin, call); |
655 | } |
656 | |
657 | void JIT::emit_op_put_by_id(const Instruction* currentInstruction) |
658 | { |
659 | // In order to be able to patch both the Structure, and the object offset, we store one pointer, |
660 | // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code |
661 | // such that the Structure & offset are always at the same distance from this. |
662 | |
663 | auto bytecode = currentInstruction->as<OpPutById>(); |
664 | int base = bytecode.m_base.offset(); |
665 | int value = bytecode.m_value.offset(); |
666 | bool direct = !!(bytecode.m_flags & PutByIdIsDirect); |
667 | |
668 | emitLoad2(base, regT1, regT0, value, regT3, regT2); |
669 | |
670 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
671 | |
672 | JITPutByIdGenerator gen( |
673 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), |
674 | JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), |
675 | regT1, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect); |
676 | |
677 | gen.generateFastPath(*this); |
678 | addSlowCase(gen.slowPathJump()); |
679 | |
680 | emitWriteBarrier(base, value, ShouldFilterBase); |
681 | |
682 | m_putByIds.append(gen); |
683 | } |
684 | |
685 | void JIT::emitSlow_op_put_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
686 | { |
687 | linkAllSlowCases(iter); |
688 | |
689 | auto bytecode = currentInstruction->as<OpPutById>(); |
690 | int base = bytecode.m_base.offset(); |
691 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
692 | |
693 | Label coldPathBegin(this); |
694 | |
695 | // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag. |
696 | emitLoadTag(base, regT1); |
697 | |
698 | JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++]; |
699 | |
700 | Call call = callOperation( |
701 | gen.slowPathFunction(), gen.stubInfo(), JSValueRegs(regT3, regT2), JSValueRegs(regT1, regT0), ident->impl()); |
702 | |
703 | gen.reportSlowPathCall(coldPathBegin, call); |
704 | } |
705 | |
706 | void JIT::emit_op_in_by_id(const Instruction* currentInstruction) |
707 | { |
708 | auto bytecode = currentInstruction->as<OpInById>(); |
709 | int dst = bytecode.m_dst.offset(); |
710 | int base = bytecode.m_base.offset(); |
711 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
712 | |
713 | emitLoad(base, regT1, regT0); |
714 | emitJumpSlowCaseIfNotJSCell(base, regT1); |
715 | |
716 | JITInByIdGenerator gen( |
717 | m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(), |
718 | ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0)); |
719 | gen.generateFastPath(*this); |
720 | addSlowCase(gen.slowPathJump()); |
721 | m_inByIds.append(gen); |
722 | |
723 | emitStore(dst, regT1, regT0); |
724 | } |
725 | |
726 | void JIT::emitSlow_op_in_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
727 | { |
728 | linkAllSlowCases(iter); |
729 | |
730 | auto bytecode = currentInstruction->as<OpInById>(); |
731 | int resultVReg = bytecode.m_dst.offset(); |
732 | const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property)); |
733 | |
734 | JITInByIdGenerator& gen = m_inByIds[m_inByIdIndex++]; |
735 | |
736 | Label coldPathBegin = label(); |
737 | |
738 | Call call = callOperation(operationInByIdOptimize, resultVReg, gen.stubInfo(), JSValueRegs(regT1, regT0), ident->impl()); |
739 | |
740 | gen.reportSlowPathCall(coldPathBegin, call); |
741 | } |
742 | |
743 | void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks) |
744 | { |
745 | if (!needsVarInjectionChecks) |
746 | return; |
747 | addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated))); |
748 | } |
749 | |
750 | void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth) |
751 | { |
752 | emitVarInjectionCheck(needsVarInjectionChecks); |
753 | move(TrustedImm32(JSValue::CellTag), regT1); |
754 | emitLoadPayload(scope, regT0); |
755 | for (unsigned i = 0; i < depth; ++i) |
756 | loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); |
757 | emitStore(dst, regT1, regT0); |
758 | } |
759 | |
760 | void JIT::emit_op_resolve_scope(const Instruction* currentInstruction) |
761 | { |
762 | auto bytecode = currentInstruction->as<OpResolveScope>(); |
763 | auto& metadata = bytecode.metadata(m_codeBlock); |
764 | int dst = bytecode.m_dst.offset(); |
765 | int scope = bytecode.m_scope.offset(); |
766 | ResolveType resolveType = metadata.m_resolveType; |
767 | unsigned depth = metadata.m_localScopeDepth; |
768 | |
769 | auto emitCode = [&] (ResolveType resolveType) { |
770 | switch (resolveType) { |
771 | case GlobalProperty: |
772 | case GlobalPropertyWithVarInjectionChecks: { |
773 | JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock); |
774 | RELEASE_ASSERT(constantScope); |
775 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
776 | load32(&metadata.m_globalLexicalBindingEpoch, regT1); |
777 | addSlowCase(branch32(NotEqual, AbsoluteAddress(m_codeBlock->globalObject()->addressOfGlobalLexicalBindingEpoch()), regT1)); |
778 | move(TrustedImm32(JSValue::CellTag), regT1); |
779 | move(TrustedImmPtr(constantScope), regT0); |
780 | emitStore(dst, regT1, regT0); |
781 | break; |
782 | } |
783 | |
784 | case GlobalVar: |
785 | case GlobalVarWithVarInjectionChecks: |
786 | case GlobalLexicalVar: |
787 | case GlobalLexicalVarWithVarInjectionChecks: { |
788 | JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock); |
789 | RELEASE_ASSERT(constantScope); |
790 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
791 | move(TrustedImm32(JSValue::CellTag), regT1); |
792 | move(TrustedImmPtr(constantScope), regT0); |
793 | emitStore(dst, regT1, regT0); |
794 | break; |
795 | } |
796 | case ClosureVar: |
797 | case ClosureVarWithVarInjectionChecks: |
798 | emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth); |
799 | break; |
800 | case ModuleVar: |
801 | move(TrustedImm32(JSValue::CellTag), regT1); |
802 | move(TrustedImmPtr(metadata.m_lexicalEnvironment.get()), regT0); |
803 | emitStore(dst, regT1, regT0); |
804 | break; |
805 | case Dynamic: |
806 | addSlowCase(jump()); |
807 | break; |
808 | case LocalClosureVar: |
809 | case UnresolvedProperty: |
810 | case UnresolvedPropertyWithVarInjectionChecks: |
811 | RELEASE_ASSERT_NOT_REACHED(); |
812 | } |
813 | }; |
814 | switch (resolveType) { |
815 | case GlobalProperty: |
816 | case GlobalPropertyWithVarInjectionChecks: { |
817 | JumpList skipToEnd; |
818 | load32(&metadata.m_resolveType, regT0); |
819 | |
820 | Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(resolveType)); |
821 | emitCode(resolveType); |
822 | skipToEnd.append(jump()); |
823 | |
824 | notGlobalProperty.link(this); |
825 | emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar); |
826 | |
827 | skipToEnd.link(this); |
828 | break; |
829 | } |
830 | case UnresolvedProperty: |
831 | case UnresolvedPropertyWithVarInjectionChecks: { |
832 | JumpList skipToEnd; |
833 | load32(&metadata.m_resolveType, regT0); |
834 | |
835 | Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty)); |
836 | emitCode(GlobalProperty); |
837 | skipToEnd.append(jump()); |
838 | notGlobalProperty.link(this); |
839 | |
840 | Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks)); |
841 | emitCode(GlobalPropertyWithVarInjectionChecks); |
842 | skipToEnd.append(jump()); |
843 | notGlobalPropertyWithVarInjections.link(this); |
844 | |
845 | Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar)); |
846 | emitCode(GlobalLexicalVar); |
847 | skipToEnd.append(jump()); |
848 | notGlobalLexicalVar.link(this); |
849 | |
850 | Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks)); |
851 | emitCode(GlobalLexicalVarWithVarInjectionChecks); |
852 | skipToEnd.append(jump()); |
853 | notGlobalLexicalVarWithVarInjections.link(this); |
854 | |
855 | addSlowCase(jump()); |
856 | skipToEnd.link(this); |
857 | break; |
858 | } |
859 | |
860 | default: |
861 | emitCode(resolveType); |
862 | break; |
863 | } |
864 | } |
865 | |
866 | void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot) |
867 | { |
868 | emitLoad(scope, regT1, regT0); |
869 | loadPtr(structureSlot, regT2); |
870 | addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT2)); |
871 | } |
872 | |
873 | void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload) |
874 | { |
875 | uintptr_t rawAddress = bitwise_cast<uintptr_t>(operand); |
876 | load32(bitwise_cast<void*>(rawAddress + TagOffset), tag); |
877 | load32(bitwise_cast<void*>(rawAddress + PayloadOffset), payload); |
878 | } |
879 | void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload) |
880 | { |
881 | loadPtr(operand, payload); |
882 | load32(Address(payload, TagOffset), tag); |
883 | load32(Address(payload, PayloadOffset), payload); |
884 | } |
885 | |
886 | void JIT::emitGetClosureVar(int scope, uintptr_t operand) |
887 | { |
888 | emitLoad(scope, regT1, regT0); |
889 | load32(Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register) + TagOffset), regT1); |
890 | load32(Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset), regT0); |
891 | } |
892 | |
893 | void JIT::emit_op_get_from_scope(const Instruction* currentInstruction) |
894 | { |
895 | auto bytecode = currentInstruction->as<OpGetFromScope>(); |
896 | auto& metadata = bytecode.metadata(m_codeBlock); |
897 | int dst = bytecode.m_dst.offset(); |
898 | int scope = bytecode.m_scope.offset(); |
899 | ResolveType resolveType = metadata.m_getPutInfo.resolveType(); |
900 | Structure** structureSlot = metadata.m_structure.slot(); |
901 | uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.m_operand); |
902 | |
903 | auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { |
904 | switch (resolveType) { |
905 | case GlobalProperty: |
906 | case GlobalPropertyWithVarInjectionChecks: { |
907 | emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection. |
908 | GPRReg base = regT2; |
909 | GPRReg resultTag = regT1; |
910 | GPRReg resultPayload = regT0; |
911 | GPRReg offset = regT3; |
912 | |
913 | move(regT0, base); |
914 | load32(operandSlot, offset); |
915 | if (!ASSERT_DISABLED) { |
916 | Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset)); |
917 | abortWithReason(JITOffsetIsNotOutOfLine); |
918 | isOutOfLine.link(this); |
919 | } |
920 | loadPtr(Address(base, JSObject::butterflyOffset()), base); |
921 | neg32(offset); |
922 | load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultPayload); |
923 | load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag); |
924 | break; |
925 | } |
926 | case GlobalVar: |
927 | case GlobalVarWithVarInjectionChecks: |
928 | case GlobalLexicalVar: |
929 | case GlobalLexicalVarWithVarInjectionChecks: |
930 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
931 | if (indirectLoadForOperand) |
932 | emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT1, regT0); |
933 | else |
934 | emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT1, regT0); |
935 | if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check. |
936 | addSlowCase(branchIfEmpty(regT1)); |
937 | break; |
938 | case ClosureVar: |
939 | case ClosureVarWithVarInjectionChecks: |
940 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
941 | emitGetClosureVar(scope, *operandSlot); |
942 | break; |
943 | case Dynamic: |
944 | addSlowCase(jump()); |
945 | break; |
946 | case ModuleVar: |
947 | case LocalClosureVar: |
948 | case UnresolvedProperty: |
949 | case UnresolvedPropertyWithVarInjectionChecks: |
950 | RELEASE_ASSERT_NOT_REACHED(); |
951 | } |
952 | }; |
953 | |
954 | switch (resolveType) { |
955 | case GlobalProperty: |
956 | case GlobalPropertyWithVarInjectionChecks: { |
957 | JumpList skipToEnd; |
958 | load32(&metadata.m_getPutInfo, regT0); |
959 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
960 | |
961 | Jump isNotGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(resolveType)); |
962 | emitCode(resolveType, false); |
963 | skipToEnd.append(jump()); |
964 | |
965 | isNotGlobalProperty.link(this); |
966 | emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar, true); |
967 | skipToEnd.link(this); |
968 | break; |
969 | } |
970 | case UnresolvedProperty: |
971 | case UnresolvedPropertyWithVarInjectionChecks: { |
972 | JumpList skipToEnd; |
973 | load32(&metadata.m_getPutInfo, regT0); |
974 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
975 | |
976 | Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty)); |
977 | Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks)); |
978 | isGlobalProperty.link(this); |
979 | emitCode(GlobalProperty, false); |
980 | skipToEnd.append(jump()); |
981 | notGlobalPropertyWithVarInjections.link(this); |
982 | |
983 | Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar)); |
984 | emitCode(GlobalLexicalVar, true); |
985 | skipToEnd.append(jump()); |
986 | notGlobalLexicalVar.link(this); |
987 | |
988 | Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks)); |
989 | emitCode(GlobalLexicalVarWithVarInjectionChecks, true); |
990 | skipToEnd.append(jump()); |
991 | notGlobalLexicalVarWithVarInjections.link(this); |
992 | |
993 | addSlowCase(jump()); |
994 | |
995 | skipToEnd.link(this); |
996 | break; |
997 | } |
998 | |
999 | default: |
1000 | emitCode(resolveType, false); |
1001 | break; |
1002 | } |
1003 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
1004 | emitStore(dst, regT1, regT0); |
1005 | } |
1006 | |
1007 | void JIT::emitSlow_op_get_from_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1008 | { |
1009 | linkAllSlowCases(iter); |
1010 | |
1011 | auto bytecode = currentInstruction->as<OpGetFromScope>(); |
1012 | int dst = bytecode.m_dst.offset(); |
1013 | callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetFromScope, dst, currentInstruction); |
1014 | } |
1015 | |
1016 | void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set) |
1017 | { |
1018 | emitLoad(value, regT1, regT0); |
1019 | emitNotifyWrite(set); |
1020 | uintptr_t rawAddress = bitwise_cast<uintptr_t>(operand); |
1021 | store32(regT1, bitwise_cast<void*>(rawAddress + TagOffset)); |
1022 | store32(regT0, bitwise_cast<void*>(rawAddress + PayloadOffset)); |
1023 | } |
1024 | |
1025 | void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet) |
1026 | { |
1027 | emitLoad(value, regT1, regT0); |
1028 | loadPtr(indirectWatchpointSet, regT2); |
1029 | emitNotifyWrite(regT2); |
1030 | loadPtr(addressOfOperand, regT2); |
1031 | store32(regT1, Address(regT2, TagOffset)); |
1032 | store32(regT0, Address(regT2, PayloadOffset)); |
1033 | } |
1034 | |
1035 | void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set) |
1036 | { |
1037 | emitLoad(value, regT3, regT2); |
1038 | emitLoad(scope, regT1, regT0); |
1039 | emitNotifyWrite(set); |
1040 | store32(regT3, Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register) + TagOffset)); |
1041 | store32(regT2, Address(regT0, JSLexicalEnvironment::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset)); |
1042 | } |
1043 | |
1044 | void JIT::emit_op_put_to_scope(const Instruction* currentInstruction) |
1045 | { |
1046 | auto bytecode = currentInstruction->as<OpPutToScope>(); |
1047 | auto& metadata = bytecode.metadata(m_codeBlock); |
1048 | int scope = bytecode.m_scope.offset(); |
1049 | int value = bytecode.m_value.offset(); |
1050 | GetPutInfo getPutInfo = copiedGetPutInfo(bytecode); |
1051 | ResolveType resolveType = getPutInfo.resolveType(); |
1052 | Structure** structureSlot = metadata.m_structure.slot(); |
1053 | uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.m_operand); |
1054 | |
1055 | auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { |
1056 | switch (resolveType) { |
1057 | case GlobalProperty: |
1058 | case GlobalPropertyWithVarInjectionChecks: { |
1059 | emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue); |
1060 | emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection. |
1061 | emitLoad(value, regT3, regT2); |
1062 | |
1063 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); |
1064 | loadPtr(operandSlot, regT1); |
1065 | negPtr(regT1); |
1066 | store32(regT3, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); |
1067 | store32(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); |
1068 | break; |
1069 | } |
1070 | case GlobalVar: |
1071 | case GlobalVarWithVarInjectionChecks: |
1072 | case GlobalLexicalVar: |
1073 | case GlobalLexicalVarWithVarInjectionChecks: { |
1074 | JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock); |
1075 | RELEASE_ASSERT(constantScope); |
1076 | emitWriteBarrier(constantScope, value, ShouldFilterValue); |
1077 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
1078 | if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { |
1079 | // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically. |
1080 | if (indirectLoadForOperand) |
1081 | emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT1, regT0); |
1082 | else |
1083 | emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT1, regT0); |
1084 | addSlowCase(branchIfEmpty(regT1)); |
1085 | } |
1086 | if (indirectLoadForOperand) |
1087 | emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, &metadata.m_watchpointSet); |
1088 | else |
1089 | emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, metadata.m_watchpointSet); |
1090 | break; |
1091 | } |
1092 | case LocalClosureVar: |
1093 | case ClosureVar: |
1094 | case ClosureVarWithVarInjectionChecks: |
1095 | emitWriteBarrier(scope, value, ShouldFilterValue); |
1096 | emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); |
1097 | emitPutClosureVar(scope, *operandSlot, value, metadata.m_watchpointSet); |
1098 | break; |
1099 | case ModuleVar: |
1100 | case Dynamic: |
1101 | addSlowCase(jump()); |
1102 | break; |
1103 | case UnresolvedProperty: |
1104 | case UnresolvedPropertyWithVarInjectionChecks: |
1105 | RELEASE_ASSERT_NOT_REACHED(); |
1106 | } |
1107 | }; |
1108 | |
1109 | switch (resolveType) { |
1110 | case GlobalProperty: |
1111 | case GlobalPropertyWithVarInjectionChecks: { |
1112 | JumpList skipToEnd; |
1113 | load32(&metadata.m_getPutInfo, regT0); |
1114 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
1115 | |
1116 | Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(resolveType)); |
1117 | Jump isGlobalLexicalVar = branch32(Equal, regT0, TrustedImm32(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar)); |
1118 | addSlowCase(jump()); // Dynamic, it can happen if we attempt to put a value to already-initialized const binding. |
1119 | |
1120 | isGlobalLexicalVar.link(this); |
1121 | emitCode(needsVarInjectionChecks(resolveType) ? GlobalLexicalVarWithVarInjectionChecks : GlobalLexicalVar, true); |
1122 | skipToEnd.append(jump()); |
1123 | |
1124 | isGlobalProperty.link(this); |
1125 | emitCode(resolveType, false); |
1126 | skipToEnd.link(this); |
1127 | break; |
1128 | } |
1129 | case UnresolvedProperty: |
1130 | case UnresolvedPropertyWithVarInjectionChecks: { |
1131 | JumpList skipToEnd; |
1132 | load32(&metadata.m_getPutInfo, regT0); |
1133 | and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 |
1134 | |
1135 | Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty)); |
1136 | Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks)); |
1137 | isGlobalProperty.link(this); |
1138 | emitCode(GlobalProperty, false); |
1139 | skipToEnd.append(jump()); |
1140 | notGlobalPropertyWithVarInjections.link(this); |
1141 | |
1142 | Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar)); |
1143 | emitCode(GlobalLexicalVar, true); |
1144 | skipToEnd.append(jump()); |
1145 | notGlobalLexicalVar.link(this); |
1146 | |
1147 | Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks)); |
1148 | emitCode(GlobalLexicalVarWithVarInjectionChecks, true); |
1149 | skipToEnd.append(jump()); |
1150 | notGlobalLexicalVarWithVarInjections.link(this); |
1151 | |
1152 | addSlowCase(jump()); |
1153 | |
1154 | skipToEnd.link(this); |
1155 | break; |
1156 | } |
1157 | |
1158 | default: |
1159 | emitCode(resolveType, false); |
1160 | break; |
1161 | } |
1162 | } |
1163 | |
1164 | void JIT::emitSlow_op_put_to_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1165 | { |
1166 | linkAllSlowCases(iter); |
1167 | |
1168 | auto bytecode = currentInstruction->as<OpPutToScope>(); |
1169 | ResolveType resolveType = copiedGetPutInfo(bytecode).resolveType(); |
1170 | if (resolveType == ModuleVar) { |
1171 | JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error); |
1172 | slowPathCall.call(); |
1173 | } else |
1174 | callOperation(operationPutToScope, currentInstruction); |
1175 | } |
1176 | |
1177 | void JIT::emit_op_get_from_arguments(const Instruction* currentInstruction) |
1178 | { |
1179 | auto bytecode = currentInstruction->as<OpGetFromArguments>(); |
1180 | int dst = bytecode.m_dst.offset(); |
1181 | int arguments = bytecode.m_arguments.offset(); |
1182 | int index = bytecode.m_index; |
1183 | |
1184 | emitLoadPayload(arguments, regT0); |
1185 | load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset), regT1); |
1186 | load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset), regT0); |
1187 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
1188 | emitStore(dst, regT1, regT0); |
1189 | } |
1190 | |
1191 | void JIT::emit_op_put_to_arguments(const Instruction* currentInstruction) |
1192 | { |
1193 | auto bytecode = currentInstruction->as<OpPutToArguments>(); |
1194 | int arguments = bytecode.m_arguments.offset(); |
1195 | int index = bytecode.m_index; |
1196 | int value = bytecode.m_value.offset(); |
1197 | |
1198 | emitWriteBarrier(arguments, value, ShouldFilterValue); |
1199 | |
1200 | emitLoadPayload(arguments, regT0); |
1201 | emitLoad(value, regT1, regT2); |
1202 | store32(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset)); |
1203 | store32(regT2, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset)); |
1204 | } |
1205 | |
1206 | } // namespace JSC |
1207 | |
1208 | #endif // USE(JSVALUE32_64) |
1209 | #endif // ENABLE(JIT) |
1210 | |