1 | /* |
2 | * Copyright (C) 2009-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2010 Patrick Gansterer <[email protected]> |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * 1. Redistributions of source code must retain the above copyright |
9 | * notice, this list of conditions and the following disclaimer. |
10 | * 2. Redistributions in binary form must reproduce the above copyright |
11 | * notice, this list of conditions and the following disclaimer in the |
12 | * documentation and/or other materials provided with the distribution. |
13 | * |
14 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | */ |
26 | |
27 | #include "config.h" |
28 | |
29 | #if ENABLE(JIT) |
30 | #if USE(JSVALUE32_64) |
31 | #include "JIT.h" |
32 | |
33 | #include "BytecodeStructs.h" |
34 | #include "CCallHelpers.h" |
35 | #include "Exception.h" |
36 | #include "JITInlines.h" |
37 | #include "JSArray.h" |
38 | #include "JSCast.h" |
39 | #include "JSFunction.h" |
40 | #include "JSPropertyNameEnumerator.h" |
41 | #include "LinkBuffer.h" |
42 | #include "MaxFrameExtentForSlowPathCall.h" |
43 | #include "OpcodeInlines.h" |
44 | #include "SlowPathCall.h" |
45 | #include "TypeProfilerLog.h" |
46 | #include "VirtualRegister.h" |
47 | |
48 | namespace JSC { |
49 | |
50 | void JIT::emit_op_mov(const Instruction* currentInstruction) |
51 | { |
52 | auto bytecode = currentInstruction->as<OpMov>(); |
53 | int dst = bytecode.m_dst.offset(); |
54 | int src = bytecode.m_src.offset(); |
55 | |
56 | if (m_codeBlock->isConstantRegisterIndex(src)) |
57 | emitStore(dst, getConstantOperand(src)); |
58 | else { |
59 | emitLoad(src, regT1, regT0); |
60 | emitStore(dst, regT1, regT0); |
61 | } |
62 | } |
63 | |
64 | void JIT::emit_op_end(const Instruction* currentInstruction) |
65 | { |
66 | ASSERT(returnValueGPR != callFrameRegister); |
67 | auto bytecode = currentInstruction->as<OpEnd>(); |
68 | emitLoad(bytecode.m_value.offset(), regT1, returnValueGPR); |
69 | emitRestoreCalleeSaves(); |
70 | emitFunctionEpilogue(); |
71 | ret(); |
72 | } |
73 | |
74 | void JIT::emit_op_jmp(const Instruction* currentInstruction) |
75 | { |
76 | auto bytecode = currentInstruction->as<OpJmp>(); |
77 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
78 | addJump(jump(), target); |
79 | } |
80 | |
81 | void JIT::emit_op_new_object(const Instruction* currentInstruction) |
82 | { |
83 | auto bytecode = currentInstruction->as<OpNewObject>(); |
84 | auto& metadata = bytecode.metadata(m_codeBlock); |
85 | Structure* structure = metadata.m_objectAllocationProfile.structure(); |
86 | size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); |
87 | Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(*m_vm, allocationSize, AllocatorForMode::AllocatorIfExists); |
88 | |
89 | RegisterID resultReg = returnValueGPR; |
90 | RegisterID allocatorReg = regT1; |
91 | RegisterID scratchReg = regT3; |
92 | |
93 | if (!allocator) |
94 | addSlowCase(jump()); |
95 | else { |
96 | JumpList slowCases; |
97 | auto butterfly = TrustedImmPtr(nullptr); |
98 | emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, scratchReg, slowCases); |
99 | emitInitializeInlineStorage(resultReg, structure->inlineCapacity()); |
100 | addSlowCase(slowCases); |
101 | emitStoreCell(bytecode.m_dst.offset(), resultReg); |
102 | } |
103 | } |
104 | |
105 | void JIT::emitSlow_op_new_object(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
106 | { |
107 | linkAllSlowCases(iter); |
108 | |
109 | auto bytecode = currentInstruction->as<OpNewObject>(); |
110 | auto& metadata = bytecode.metadata(m_codeBlock); |
111 | int dst = bytecode.m_dst.offset(); |
112 | Structure* structure = metadata.m_objectAllocationProfile.structure(); |
113 | callOperation(operationNewObject, TrustedImmPtr(&vm()), structure); |
114 | emitStoreCell(dst, returnValueGPR); |
115 | } |
116 | |
117 | void JIT::emit_op_overrides_has_instance(const Instruction* currentInstruction) |
118 | { |
119 | auto bytecode = currentInstruction->as<OpOverridesHasInstance>(); |
120 | int dst = bytecode.m_dst.offset(); |
121 | int constructor = bytecode.m_constructor.offset(); |
122 | int hasInstanceValue = bytecode.m_hasInstanceValue.offset(); |
123 | |
124 | emitLoadPayload(hasInstanceValue, regT0); |
125 | // We don't jump if we know what Symbol.hasInstance would do. |
126 | Jump hasInstanceValueNotCell = emitJumpIfNotJSCell(hasInstanceValue); |
127 | Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction())); |
128 | |
129 | // We know that constructor is an object from the way bytecode is emitted for instanceof expressions. |
130 | emitLoadPayload(constructor, regT0); |
131 | |
132 | // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function. |
133 | test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0); |
134 | Jump done = jump(); |
135 | |
136 | hasInstanceValueNotCell.link(this); |
137 | customhasInstanceValue.link(this); |
138 | move(TrustedImm32(1), regT0); |
139 | |
140 | done.link(this); |
141 | emitStoreBool(dst, regT0); |
142 | |
143 | } |
144 | |
145 | void JIT::emit_op_instanceof(const Instruction* currentInstruction) |
146 | { |
147 | auto bytecode = currentInstruction->as<OpInstanceof>(); |
148 | int dst = bytecode.m_dst.offset(); |
149 | int value = bytecode.m_value.offset(); |
150 | int proto = bytecode.m_prototype.offset(); |
151 | |
152 | // Load the operands into registers. |
153 | // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. |
154 | emitLoadPayload(value, regT2); |
155 | emitLoadPayload(proto, regT1); |
156 | |
157 | // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance. |
158 | emitJumpSlowCaseIfNotJSCell(value); |
159 | emitJumpSlowCaseIfNotJSCell(proto); |
160 | |
161 | JITInstanceOfGenerator gen( |
162 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), |
163 | RegisterSet::stubUnavailableRegisters(), |
164 | regT0, // result |
165 | regT2, // value |
166 | regT1, // proto |
167 | regT3, regT4); // scratch |
168 | gen.generateFastPath(*this); |
169 | m_instanceOfs.append(gen); |
170 | |
171 | emitStoreBool(dst, regT0); |
172 | } |
173 | |
174 | void JIT::emit_op_instanceof_custom(const Instruction*) |
175 | { |
176 | // This always goes to slow path since we expect it to be rare. |
177 | addSlowCase(jump()); |
178 | } |
179 | |
180 | void JIT::emitSlow_op_instanceof(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
181 | { |
182 | linkAllSlowCases(iter); |
183 | |
184 | auto bytecode = currentInstruction->as<OpInstanceof>(); |
185 | int dst = bytecode.m_dst.offset(); |
186 | int value = bytecode.m_value.offset(); |
187 | int proto = bytecode.m_prototype.offset(); |
188 | |
189 | JITInstanceOfGenerator& gen = m_instanceOfs[m_instanceOfIndex++]; |
190 | |
191 | Label coldPathBegin = label(); |
192 | emitLoadTag(value, regT0); |
193 | emitLoadTag(proto, regT3); |
194 | Call call = callOperation(operationInstanceOfOptimize, dst, m_codeBlock->globalObject(), gen.stubInfo(), JSValueRegs(regT0, regT2), JSValueRegs(regT3, regT1)); |
195 | gen.reportSlowPathCall(coldPathBegin, call); |
196 | } |
197 | |
198 | void JIT::emitSlow_op_instanceof_custom(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
199 | { |
200 | linkAllSlowCases(iter); |
201 | |
202 | auto bytecode = currentInstruction->as<OpInstanceofCustom>(); |
203 | int dst = bytecode.m_dst.offset(); |
204 | int value = bytecode.m_value.offset(); |
205 | int constructor = bytecode.m_constructor.offset(); |
206 | int hasInstanceValue = bytecode.m_hasInstanceValue.offset(); |
207 | |
208 | emitLoad(value, regT1, regT0); |
209 | emitLoadPayload(constructor, regT2); |
210 | emitLoad(hasInstanceValue, regT4, regT3); |
211 | callOperation(operationInstanceOfCustom, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), regT2, JSValueRegs(regT4, regT3)); |
212 | emitStoreBool(dst, returnValueGPR); |
213 | } |
214 | |
215 | void JIT::emit_op_is_empty(const Instruction* currentInstruction) |
216 | { |
217 | auto bytecode = currentInstruction->as<OpIsEmpty>(); |
218 | int dst = bytecode.m_dst.offset(); |
219 | int value = bytecode.m_operand.offset(); |
220 | |
221 | emitLoad(value, regT1, regT0); |
222 | compare32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag), regT0); |
223 | |
224 | emitStoreBool(dst, regT0); |
225 | } |
226 | |
227 | void JIT::emit_op_is_undefined(const Instruction* currentInstruction) |
228 | { |
229 | auto bytecode = currentInstruction->as<OpIsUndefined>(); |
230 | int dst = bytecode.m_dst.offset(); |
231 | int value = bytecode.m_operand.offset(); |
232 | |
233 | emitLoad(value, regT1, regT0); |
234 | Jump isCell = branchIfCell(regT1); |
235 | |
236 | compare32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag), regT0); |
237 | Jump done = jump(); |
238 | |
239 | isCell.link(this); |
240 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
241 | move(TrustedImm32(0), regT0); |
242 | Jump notMasqueradesAsUndefined = jump(); |
243 | |
244 | isMasqueradesAsUndefined.link(this); |
245 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT1); |
246 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
247 | loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); |
248 | compare32(Equal, regT0, regT1, regT0); |
249 | |
250 | notMasqueradesAsUndefined.link(this); |
251 | done.link(this); |
252 | emitStoreBool(dst, regT0); |
253 | } |
254 | |
255 | void JIT::emit_op_is_undefined_or_null(const Instruction* currentInstruction) |
256 | { |
257 | auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>(); |
258 | int dst = bytecode.m_dst.offset(); |
259 | int value = bytecode.m_operand.offset(); |
260 | |
261 | emitLoadTag(value, regT0); |
262 | static_assert((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1), "" ); |
263 | or32(TrustedImm32(1), regT0); |
264 | compare32(Equal, regT0, TrustedImm32(JSValue::NullTag), regT0); |
265 | emitStoreBool(dst, regT0); |
266 | } |
267 | |
268 | void JIT::emit_op_is_boolean(const Instruction* currentInstruction) |
269 | { |
270 | auto bytecode = currentInstruction->as<OpIsBoolean>(); |
271 | int dst = bytecode.m_dst.offset(); |
272 | int value = bytecode.m_operand.offset(); |
273 | |
274 | emitLoadTag(value, regT0); |
275 | compare32(Equal, regT0, TrustedImm32(JSValue::BooleanTag), regT0); |
276 | emitStoreBool(dst, regT0); |
277 | } |
278 | |
279 | void JIT::emit_op_is_number(const Instruction* currentInstruction) |
280 | { |
281 | auto bytecode = currentInstruction->as<OpIsNumber>(); |
282 | int dst = bytecode.m_dst.offset(); |
283 | int value = bytecode.m_operand.offset(); |
284 | |
285 | emitLoadTag(value, regT0); |
286 | add32(TrustedImm32(1), regT0); |
287 | compare32(Below, regT0, TrustedImm32(JSValue::LowestTag + 1), regT0); |
288 | emitStoreBool(dst, regT0); |
289 | } |
290 | |
291 | void JIT::emit_op_is_cell_with_type(const Instruction* currentInstruction) |
292 | { |
293 | auto bytecode = currentInstruction->as<OpIsCellWithType>(); |
294 | int dst = bytecode.m_dst.offset(); |
295 | int value = bytecode.m_operand.offset(); |
296 | int type = bytecode.m_type; |
297 | |
298 | emitLoad(value, regT1, regT0); |
299 | Jump isNotCell = branchIfNotCell(regT1); |
300 | |
301 | compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(type), regT0); |
302 | Jump done = jump(); |
303 | |
304 | isNotCell.link(this); |
305 | move(TrustedImm32(0), regT0); |
306 | |
307 | done.link(this); |
308 | emitStoreBool(dst, regT0); |
309 | } |
310 | |
311 | void JIT::emit_op_is_object(const Instruction* currentInstruction) |
312 | { |
313 | auto bytecode = currentInstruction->as<OpIsObject>(); |
314 | int dst = bytecode.m_dst.offset(); |
315 | int value = bytecode.m_operand.offset(); |
316 | |
317 | emitLoad(value, regT1, regT0); |
318 | Jump isNotCell = branchIfNotCell(regT1); |
319 | |
320 | compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); |
321 | Jump done = jump(); |
322 | |
323 | isNotCell.link(this); |
324 | move(TrustedImm32(0), regT0); |
325 | |
326 | done.link(this); |
327 | emitStoreBool(dst, regT0); |
328 | } |
329 | |
330 | void JIT::emit_op_to_primitive(const Instruction* currentInstruction) |
331 | { |
332 | auto bytecode = currentInstruction->as<OpToPrimitive>(); |
333 | int dst = bytecode.m_dst.offset(); |
334 | int src = bytecode.m_src.offset(); |
335 | |
336 | emitLoad(src, regT1, regT0); |
337 | |
338 | Jump isImm = branchIfNotCell(regT1); |
339 | addSlowCase(branchIfObject(regT0)); |
340 | isImm.link(this); |
341 | |
342 | if (dst != src) |
343 | emitStore(dst, regT1, regT0); |
344 | } |
345 | |
346 | void JIT::emit_op_set_function_name(const Instruction* currentInstruction) |
347 | { |
348 | auto bytecode = currentInstruction->as<OpSetFunctionName>(); |
349 | int func = bytecode.m_function.offset(); |
350 | int name = bytecode.m_name.offset(); |
351 | emitLoadPayload(func, regT1); |
352 | emitLoad(name, regT3, regT2); |
353 | callOperation(operationSetFunctionName, m_codeBlock->globalObject(), regT1, JSValueRegs(regT3, regT2)); |
354 | } |
355 | |
356 | void JIT::emit_op_not(const Instruction* currentInstruction) |
357 | { |
358 | auto bytecode = currentInstruction->as<OpNot>(); |
359 | int dst = bytecode.m_dst.offset(); |
360 | int src = bytecode.m_operand.offset(); |
361 | |
362 | emitLoadTag(src, regT0); |
363 | |
364 | emitLoad(src, regT1, regT0); |
365 | addSlowCase(branchIfNotBoolean(regT1, InvalidGPRReg)); |
366 | xor32(TrustedImm32(1), regT0); |
367 | |
368 | emitStoreBool(dst, regT0, (dst == src)); |
369 | } |
370 | |
371 | void JIT::emit_op_jfalse(const Instruction* currentInstruction) |
372 | { |
373 | auto bytecode = currentInstruction->as<OpJfalse>(); |
374 | int cond = bytecode.m_condition.offset(); |
375 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
376 | |
377 | emitLoad(cond, regT1, regT0); |
378 | |
379 | JSValueRegs value(regT1, regT0); |
380 | GPRReg scratch1 = regT2; |
381 | GPRReg scratch2 = regT3; |
382 | bool shouldCheckMasqueradesAsUndefined = true; |
383 | addJump(branchIfFalsey(vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); |
384 | } |
385 | |
386 | void JIT::emit_op_jtrue(const Instruction* currentInstruction) |
387 | { |
388 | auto bytecode = currentInstruction->as<OpJtrue>(); |
389 | int cond = bytecode.m_condition.offset(); |
390 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
391 | |
392 | emitLoad(cond, regT1, regT0); |
393 | bool shouldCheckMasqueradesAsUndefined = true; |
394 | JSValueRegs value(regT1, regT0); |
395 | GPRReg scratch1 = regT2; |
396 | GPRReg scratch2 = regT3; |
397 | addJump(branchIfTruthy(vm(), value, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); |
398 | } |
399 | |
400 | void JIT::emit_op_jeq_null(const Instruction* currentInstruction) |
401 | { |
402 | auto bytecode = currentInstruction->as<OpJeqNull>(); |
403 | int src = bytecode.m_value.offset(); |
404 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
405 | |
406 | emitLoad(src, regT1, regT0); |
407 | |
408 | Jump isImmediate = branchIfNotCell(regT1); |
409 | |
410 | Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
411 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); |
412 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
413 | addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); |
414 | Jump masqueradesGlobalObjectIsForeign = jump(); |
415 | |
416 | // Now handle the immediate cases - undefined & null |
417 | isImmediate.link(this); |
418 | static_assert((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1), "" ); |
419 | or32(TrustedImm32(1), regT1); |
420 | addJump(branchIfNull(regT1), target); |
421 | |
422 | isNotMasqueradesAsUndefined.link(this); |
423 | masqueradesGlobalObjectIsForeign.link(this); |
424 | } |
425 | |
426 | void JIT::emit_op_jneq_null(const Instruction* currentInstruction) |
427 | { |
428 | auto bytecode = currentInstruction->as<OpJneqNull>(); |
429 | int src = bytecode.m_value.offset(); |
430 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
431 | |
432 | emitLoad(src, regT1, regT0); |
433 | |
434 | Jump isImmediate = branchIfNotCell(regT1); |
435 | |
436 | addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); |
437 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); |
438 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
439 | addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); |
440 | Jump wasNotImmediate = jump(); |
441 | |
442 | // Now handle the immediate cases - undefined & null |
443 | isImmediate.link(this); |
444 | |
445 | static_assert((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1), "" ); |
446 | or32(TrustedImm32(1), regT1); |
447 | addJump(branchIfNotNull(regT1), target); |
448 | |
449 | wasNotImmediate.link(this); |
450 | } |
451 | |
452 | void JIT::emit_op_jundefined_or_null(const Instruction* currentInstruction) |
453 | { |
454 | auto bytecode = currentInstruction->as<OpJundefinedOrNull>(); |
455 | int value = bytecode.m_value.offset(); |
456 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
457 | |
458 | emitLoadTag(value, regT0); |
459 | static_assert((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1), "" ); |
460 | or32(TrustedImm32(1), regT0); |
461 | addJump(branchIfNull(regT0), target); |
462 | } |
463 | |
464 | void JIT::emit_op_jnundefined_or_null(const Instruction* currentInstruction) |
465 | { |
466 | auto bytecode = currentInstruction->as<OpJnundefinedOrNull>(); |
467 | int value = bytecode.m_value.offset(); |
468 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
469 | |
470 | emitLoadTag(value, regT0); |
471 | static_assert((JSValue::UndefinedTag + 1 == JSValue::NullTag) && (JSValue::NullTag & 0x1), "" ); |
472 | or32(TrustedImm32(1), regT0); |
473 | addJump(branchIfNotNull(regT0), target); |
474 | } |
475 | |
476 | void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction) |
477 | { |
478 | auto bytecode = currentInstruction->as<OpJneqPtr>(); |
479 | auto& metadata = bytecode.metadata(m_codeBlock); |
480 | int src = bytecode.m_value.offset(); |
481 | JSValue specialPointer = getConstantOperand(bytecode.m_specialPointer.offset()); |
482 | ASSERT(specialPointer.isCell()); |
483 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
484 | |
485 | emitLoad(src, regT1, regT0); |
486 | Jump notCell = branchIfNotCell(regT1); |
487 | Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(specialPointer.asCell())); |
488 | notCell.link(this); |
489 | store8(TrustedImm32(1), &metadata.m_hasJumped); |
490 | addJump(jump(), target); |
491 | equal.link(this); |
492 | } |
493 | |
494 | void JIT::emit_op_eq(const Instruction* currentInstruction) |
495 | { |
496 | auto bytecode = currentInstruction->as<OpEq>(); |
497 | |
498 | int dst = bytecode.m_dst.offset(); |
499 | int src1 = bytecode.m_lhs.offset(); |
500 | int src2 = bytecode.m_rhs.offset(); |
501 | |
502 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
503 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
504 | addSlowCase(branchIfCell(regT1)); |
505 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
506 | |
507 | compare32(Equal, regT0, regT2, regT0); |
508 | |
509 | emitStoreBool(dst, regT0); |
510 | } |
511 | |
512 | void JIT::emitSlow_op_eq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
513 | { |
514 | auto bytecode = currentInstruction->as<OpEq>(); |
515 | int dst = bytecode.m_dst.offset(); |
516 | |
517 | JumpList storeResult; |
518 | JumpList genericCase; |
519 | |
520 | genericCase.append(getSlowCase(iter)); // tags not equal |
521 | |
522 | linkSlowCase(iter); // tags equal and JSCell |
523 | genericCase.append(branchIfNotString(regT0)); |
524 | genericCase.append(branchIfNotString(regT2)); |
525 | |
526 | // String case. |
527 | callOperation(operationCompareStringEq, m_codeBlock->globalObject(), regT0, regT2); |
528 | storeResult.append(jump()); |
529 | |
530 | // Generic case. |
531 | genericCase.append(getSlowCase(iter)); // doubles |
532 | genericCase.link(this); |
533 | callOperation(operationCompareEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
534 | |
535 | storeResult.link(this); |
536 | emitStoreBool(dst, returnValueGPR); |
537 | } |
538 | |
539 | void JIT::emit_op_jeq(const Instruction* currentInstruction) |
540 | { |
541 | auto bytecode = currentInstruction->as<OpJeq>(); |
542 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
543 | int src1 = bytecode.m_lhs.offset(); |
544 | int src2 = bytecode.m_rhs.offset(); |
545 | |
546 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
547 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
548 | addSlowCase(branchIfCell(regT1)); |
549 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
550 | |
551 | addJump(branch32(Equal, regT0, regT2), target); |
552 | } |
553 | |
554 | void JIT::compileOpEqJumpSlow(Vector<SlowCaseEntry>::iterator& iter, CompileOpEqType type, int jumpTarget) |
555 | { |
556 | JumpList done; |
557 | JumpList genericCase; |
558 | |
559 | genericCase.append(getSlowCase(iter)); // tags not equal |
560 | |
561 | linkSlowCase(iter); // tags equal and JSCell |
562 | genericCase.append(branchIfNotString(regT0)); |
563 | genericCase.append(branchIfNotString(regT2)); |
564 | |
565 | // String case. |
566 | callOperation(operationCompareStringEq, m_codeBlock->globalObject(), regT0, regT2); |
567 | emitJumpSlowToHot(branchTest32(type == CompileOpEqType::Eq ? NonZero : Zero, returnValueGPR), jumpTarget); |
568 | done.append(jump()); |
569 | |
570 | // Generic case. |
571 | genericCase.append(getSlowCase(iter)); // doubles |
572 | genericCase.link(this); |
573 | callOperation(operationCompareEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
574 | emitJumpSlowToHot(branchTest32(type == CompileOpEqType::Eq ? NonZero : Zero, returnValueGPR), jumpTarget); |
575 | |
576 | done.link(this); |
577 | } |
578 | |
579 | void JIT::emitSlow_op_jeq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
580 | { |
581 | auto bytecode = currentInstruction->as<OpJeq>(); |
582 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
583 | compileOpEqJumpSlow(iter, CompileOpEqType::Eq, target); |
584 | } |
585 | |
586 | void JIT::emit_op_neq(const Instruction* currentInstruction) |
587 | { |
588 | auto bytecode = currentInstruction->as<OpNeq>(); |
589 | int dst = bytecode.m_dst.offset(); |
590 | int src1 = bytecode.m_lhs.offset(); |
591 | int src2 = bytecode.m_rhs.offset(); |
592 | |
593 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
594 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
595 | addSlowCase(branchIfCell(regT1)); |
596 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
597 | |
598 | compare32(NotEqual, regT0, regT2, regT0); |
599 | |
600 | emitStoreBool(dst, regT0); |
601 | } |
602 | |
603 | void JIT::emitSlow_op_neq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
604 | { |
605 | auto bytecode = currentInstruction->as<OpNeq>(); |
606 | int dst = bytecode.m_dst.offset(); |
607 | |
608 | JumpList storeResult; |
609 | JumpList genericCase; |
610 | |
611 | genericCase.append(getSlowCase(iter)); // tags not equal |
612 | |
613 | linkSlowCase(iter); // tags equal and JSCell |
614 | genericCase.append(branchIfNotString(regT0)); |
615 | genericCase.append(branchIfNotString(regT2)); |
616 | |
617 | // String case. |
618 | callOperation(operationCompareStringEq, m_codeBlock->globalObject(), regT0, regT2); |
619 | storeResult.append(jump()); |
620 | |
621 | // Generic case. |
622 | genericCase.append(getSlowCase(iter)); // doubles |
623 | genericCase.link(this); |
624 | callOperation(operationCompareEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
625 | |
626 | storeResult.link(this); |
627 | xor32(TrustedImm32(0x1), returnValueGPR); |
628 | emitStoreBool(dst, returnValueGPR); |
629 | } |
630 | |
631 | void JIT::emit_op_jneq(const Instruction* currentInstruction) |
632 | { |
633 | auto bytecode = currentInstruction->as<OpJneq>(); |
634 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
635 | int src1 = bytecode.m_lhs.offset(); |
636 | int src2 = bytecode.m_rhs.offset(); |
637 | |
638 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
639 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
640 | addSlowCase(branchIfCell(regT1)); |
641 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
642 | |
643 | addJump(branch32(NotEqual, regT0, regT2), target); |
644 | } |
645 | |
646 | void JIT::emitSlow_op_jneq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
647 | { |
648 | auto bytecode = currentInstruction->as<OpJneq>(); |
649 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
650 | compileOpEqJumpSlow(iter, CompileOpEqType::NEq, target); |
651 | } |
652 | |
653 | template <typename Op> |
654 | void JIT::compileOpStrictEq(const Instruction* currentInstruction, CompileOpStrictEqType type) |
655 | { |
656 | auto bytecode = currentInstruction->as<Op>(); |
657 | int dst = bytecode.m_dst.offset(); |
658 | int src1 = bytecode.m_lhs.offset(); |
659 | int src2 = bytecode.m_rhs.offset(); |
660 | |
661 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
662 | |
663 | // Bail if the tags differ, or are double. |
664 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
665 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
666 | |
667 | // Jump to a slow case if both are strings or symbols (non object). |
668 | Jump notCell = branchIfNotCell(regT1); |
669 | Jump firstIsObject = branchIfObject(regT0); |
670 | addSlowCase(branchIfNotObject(regT2)); |
671 | notCell.link(this); |
672 | firstIsObject.link(this); |
673 | |
674 | // Simply compare the payloads. |
675 | if (type == CompileOpStrictEqType::StrictEq) |
676 | compare32(Equal, regT0, regT2, regT0); |
677 | else |
678 | compare32(NotEqual, regT0, regT2, regT0); |
679 | |
680 | emitStoreBool(dst, regT0); |
681 | } |
682 | |
683 | void JIT::emit_op_stricteq(const Instruction* currentInstruction) |
684 | { |
685 | compileOpStrictEq<OpStricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); |
686 | } |
687 | |
688 | void JIT::emit_op_nstricteq(const Instruction* currentInstruction) |
689 | { |
690 | compileOpStrictEq<OpNstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); |
691 | } |
692 | |
693 | template<typename Op> |
694 | void JIT::compileOpStrictEqJump(const Instruction* currentInstruction, CompileOpStrictEqType type) |
695 | { |
696 | auto bytecode = currentInstruction->as<Op>(); |
697 | int target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
698 | int src1 = bytecode.m_lhs.offset(); |
699 | int src2 = bytecode.m_rhs.offset(); |
700 | |
701 | emitLoad2(src1, regT1, regT0, src2, regT3, regT2); |
702 | |
703 | // Bail if the tags differ, or are double. |
704 | addSlowCase(branch32(NotEqual, regT1, regT3)); |
705 | addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); |
706 | |
707 | // Jump to a slow case if both are strings or symbols (non object). |
708 | Jump notCell = branchIfNotCell(regT1); |
709 | Jump firstIsObject = branchIfObject(regT0); |
710 | addSlowCase(branchIfNotObject(regT2)); |
711 | notCell.link(this); |
712 | firstIsObject.link(this); |
713 | |
714 | // Simply compare the payloads. |
715 | if (type == CompileOpStrictEqType::StrictEq) |
716 | addJump(branch32(Equal, regT0, regT2), target); |
717 | else |
718 | addJump(branch32(NotEqual, regT0, regT2), target); |
719 | } |
720 | |
721 | void JIT::emit_op_jstricteq(const Instruction* currentInstruction) |
722 | { |
723 | compileOpStrictEqJump<OpJstricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); |
724 | } |
725 | |
726 | void JIT::emit_op_jnstricteq(const Instruction* currentInstruction) |
727 | { |
728 | compileOpStrictEqJump<OpJnstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); |
729 | } |
730 | |
731 | void JIT::emitSlow_op_jstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
732 | { |
733 | linkAllSlowCases(iter); |
734 | |
735 | auto bytecode = currentInstruction->as<OpJstricteq>(); |
736 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
737 | callOperation(operationCompareStrictEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
738 | emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target); |
739 | } |
740 | |
741 | void JIT::emitSlow_op_jnstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
742 | { |
743 | linkAllSlowCases(iter); |
744 | |
745 | auto bytecode = currentInstruction->as<OpJnstricteq>(); |
746 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
747 | callOperation(operationCompareStrictEq, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
748 | emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); |
749 | } |
750 | |
751 | void JIT::emit_op_eq_null(const Instruction* currentInstruction) |
752 | { |
753 | auto bytecode = currentInstruction->as<OpEqNull>(); |
754 | int dst = bytecode.m_dst.offset(); |
755 | int src = bytecode.m_operand.offset(); |
756 | |
757 | emitLoad(src, regT1, regT0); |
758 | Jump isImmediate = branchIfNotCell(regT1); |
759 | |
760 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
761 | move(TrustedImm32(0), regT1); |
762 | Jump wasNotMasqueradesAsUndefined = jump(); |
763 | |
764 | isMasqueradesAsUndefined.link(this); |
765 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); |
766 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
767 | loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); |
768 | compare32(Equal, regT0, regT2, regT1); |
769 | Jump wasNotImmediate = jump(); |
770 | |
771 | isImmediate.link(this); |
772 | |
773 | compare32(Equal, regT1, TrustedImm32(JSValue::NullTag), regT2); |
774 | compare32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag), regT1); |
775 | or32(regT2, regT1); |
776 | |
777 | wasNotImmediate.link(this); |
778 | wasNotMasqueradesAsUndefined.link(this); |
779 | |
780 | emitStoreBool(dst, regT1); |
781 | } |
782 | |
783 | void JIT::emit_op_neq_null(const Instruction* currentInstruction) |
784 | { |
785 | auto bytecode = currentInstruction->as<OpNeqNull>(); |
786 | int dst = bytecode.m_dst.offset(); |
787 | int src = bytecode.m_operand.offset(); |
788 | |
789 | emitLoad(src, regT1, regT0); |
790 | Jump isImmediate = branchIfNotCell(regT1); |
791 | |
792 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
793 | move(TrustedImm32(1), regT1); |
794 | Jump wasNotMasqueradesAsUndefined = jump(); |
795 | |
796 | isMasqueradesAsUndefined.link(this); |
797 | loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); |
798 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
799 | loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); |
800 | compare32(NotEqual, regT0, regT2, regT1); |
801 | Jump wasNotImmediate = jump(); |
802 | |
803 | isImmediate.link(this); |
804 | |
805 | compare32(NotEqual, regT1, TrustedImm32(JSValue::NullTag), regT2); |
806 | compare32(NotEqual, regT1, TrustedImm32(JSValue::UndefinedTag), regT1); |
807 | and32(regT2, regT1); |
808 | |
809 | wasNotImmediate.link(this); |
810 | wasNotMasqueradesAsUndefined.link(this); |
811 | |
812 | emitStoreBool(dst, regT1); |
813 | } |
814 | |
815 | void JIT::emit_op_throw(const Instruction* currentInstruction) |
816 | { |
817 | auto bytecode = currentInstruction->as<OpThrow>(); |
818 | ASSERT(regT0 == returnValueGPR); |
819 | copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm().topEntryFrame); |
820 | emitLoad(bytecode.m_value.offset(), regT1, regT0); |
821 | callOperationNoExceptionCheck(operationThrow, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0)); |
822 | jumpToExceptionHandler(vm()); |
823 | } |
824 | |
825 | void JIT::emit_op_to_number(const Instruction* currentInstruction) |
826 | { |
827 | auto bytecode = currentInstruction->as<OpToNumber>(); |
828 | int dst = bytecode.m_dst.offset(); |
829 | int src = bytecode.m_operand.offset(); |
830 | |
831 | emitLoad(src, regT1, regT0); |
832 | |
833 | Jump isInt32 = branchIfInt32(regT1); |
834 | addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag))); |
835 | isInt32.link(this); |
836 | |
837 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
838 | if (src != dst) |
839 | emitStore(dst, regT1, regT0); |
840 | } |
841 | |
842 | void JIT::emit_op_to_numeric(const Instruction* currentInstruction) |
843 | { |
844 | auto bytecode = currentInstruction->as<OpToNumeric>(); |
845 | int dst = bytecode.m_dst.offset(); |
846 | int src = bytecode.m_operand.offset(); |
847 | JSValueRegs argumentValueRegs(regT1, regT0); |
848 | |
849 | emitLoad(src, regT1, regT0); |
850 | |
851 | Jump isNotCell = branchIfNotCell(regT1); |
852 | addSlowCase(branchIfNotBigInt(regT0)); |
853 | Jump isBigInt = jump(); |
854 | |
855 | isNotCell.link(this); |
856 | addSlowCase(branchIfNotNumber(argumentValueRegs, regT2)); |
857 | isBigInt.link(this); |
858 | |
859 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
860 | if (src != dst) |
861 | emitStore(dst, regT1, regT0); |
862 | } |
863 | |
864 | void JIT::emit_op_to_string(const Instruction* currentInstruction) |
865 | { |
866 | auto bytecode = currentInstruction->as<OpToString>(); |
867 | int dst = bytecode.m_dst.offset(); |
868 | int src = bytecode.m_operand.offset(); |
869 | |
870 | emitLoad(src, regT1, regT0); |
871 | |
872 | addSlowCase(branchIfNotCell(regT1)); |
873 | addSlowCase(branchIfNotString(regT0)); |
874 | |
875 | if (src != dst) |
876 | emitStore(dst, regT1, regT0); |
877 | } |
878 | |
879 | void JIT::emit_op_to_object(const Instruction* currentInstruction) |
880 | { |
881 | auto bytecode = currentInstruction->as<OpToObject>(); |
882 | int dst = bytecode.m_dst.offset(); |
883 | int src = bytecode.m_operand.offset(); |
884 | |
885 | emitLoad(src, regT1, regT0); |
886 | |
887 | addSlowCase(branchIfNotCell(regT1)); |
888 | addSlowCase(branchIfNotObject(regT0)); |
889 | |
890 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
891 | if (src != dst) |
892 | emitStore(dst, regT1, regT0); |
893 | } |
894 | |
895 | void JIT::emit_op_catch(const Instruction* currentInstruction) |
896 | { |
897 | auto bytecode = currentInstruction->as<OpCatch>(); |
898 | |
899 | restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm().topEntryFrame); |
900 | |
901 | move(TrustedImmPtr(m_vm), regT3); |
902 | // operationThrow returns the callFrame for the handler. |
903 | load32(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister); |
904 | storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset())); |
905 | |
906 | addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); |
907 | |
908 | callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler, TrustedImmPtr(&vm())); |
909 | Jump isCatchableException = branchTest32(Zero, returnValueGPR); |
910 | jumpToExceptionHandler(vm()); |
911 | isCatchableException.link(this); |
912 | |
913 | move(TrustedImmPtr(m_vm), regT3); |
914 | |
915 | // Now store the exception returned by operationThrow. |
916 | load32(Address(regT3, VM::exceptionOffset()), regT2); |
917 | move(TrustedImm32(JSValue::CellTag), regT1); |
918 | |
919 | store32(TrustedImm32(0), Address(regT3, VM::exceptionOffset())); |
920 | |
921 | unsigned exception = bytecode.m_exception.offset(); |
922 | emitStore(exception, regT1, regT2); |
923 | |
924 | load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); |
925 | load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); |
926 | |
927 | unsigned thrownValue = bytecode.m_thrownValue.offset(); |
928 | emitStore(thrownValue, regT1, regT0); |
929 | |
930 | #if ENABLE(DFG_JIT) |
931 | // FIXME: consider inline caching the process of doing OSR entry, including |
932 | // argument type proofs, storing locals to the buffer, etc |
933 | // https://bugs.webkit.org/show_bug.cgi?id=175598 |
934 | |
935 | auto& metadata = bytecode.metadata(m_codeBlock); |
936 | ValueProfileAndOperandBuffer* buffer = metadata.m_buffer; |
937 | if (buffer || !shouldEmitProfiling()) |
938 | callOperation(operationTryOSREnterAtCatch, &vm(), m_bytecodeIndex.asBits()); |
939 | else |
940 | callOperation(operationTryOSREnterAtCatchAndValueProfile, &vm(), m_bytecodeIndex.asBits()); |
941 | auto skipOSREntry = branchTestPtr(Zero, returnValueGPR); |
942 | emitRestoreCalleeSaves(); |
943 | farJump(returnValueGPR, NoPtrTag); |
944 | skipOSREntry.link(this); |
945 | if (buffer && shouldEmitProfiling()) { |
946 | buffer->forEach([&] (ValueProfileAndOperand& profile) { |
947 | JSValueRegs regs(regT1, regT0); |
948 | emitGetVirtualRegister(profile.m_operand, regs); |
949 | emitValueProfilingSite(static_cast<ValueProfile&>(profile)); |
950 | }); |
951 | } |
952 | #endif // ENABLE(DFG_JIT) |
953 | } |
954 | |
955 | void JIT::emit_op_identity_with_profile(const Instruction*) |
956 | { |
957 | // We don't need to do anything here... |
958 | } |
959 | |
960 | void JIT::emit_op_get_parent_scope(const Instruction* currentInstruction) |
961 | { |
962 | auto bytecode = currentInstruction->as<OpGetParentScope>(); |
963 | int currentScope = bytecode.m_scope.offset(); |
964 | emitLoadPayload(currentScope, regT0); |
965 | loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); |
966 | emitStoreCell(bytecode.m_dst.offset(), regT0); |
967 | } |
968 | |
969 | void JIT::emit_op_switch_imm(const Instruction* currentInstruction) |
970 | { |
971 | auto bytecode = currentInstruction->as<OpSwitchImm>(); |
972 | size_t tableIndex = bytecode.m_tableIndex; |
973 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset); |
974 | unsigned scrutinee = bytecode.m_scrutinee.offset(); |
975 | |
976 | // create jump table for switch destinations, track this switch statement. |
977 | SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); |
978 | m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate)); |
979 | jumpTable->ensureCTITable(); |
980 | |
981 | emitLoad(scrutinee, regT1, regT0); |
982 | callOperation(operationSwitchImmWithUnknownKeyType, TrustedImmPtr(&vm()), JSValueRegs(regT1, regT0), tableIndex); |
983 | farJump(returnValueGPR, NoPtrTag); |
984 | } |
985 | |
986 | void JIT::emit_op_switch_char(const Instruction* currentInstruction) |
987 | { |
988 | auto bytecode = currentInstruction->as<OpSwitchChar>(); |
989 | size_t tableIndex = bytecode.m_tableIndex; |
990 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset); |
991 | unsigned scrutinee = bytecode.m_scrutinee.offset(); |
992 | |
993 | // create jump table for switch destinations, track this switch statement. |
994 | SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); |
995 | m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character)); |
996 | jumpTable->ensureCTITable(); |
997 | |
998 | emitLoad(scrutinee, regT1, regT0); |
999 | callOperation(operationSwitchCharWithUnknownKeyType, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), tableIndex); |
1000 | farJump(returnValueGPR, NoPtrTag); |
1001 | } |
1002 | |
1003 | void JIT::emit_op_switch_string(const Instruction* currentInstruction) |
1004 | { |
1005 | auto bytecode = currentInstruction->as<OpSwitchString>(); |
1006 | size_t tableIndex = bytecode.m_tableIndex; |
1007 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset); |
1008 | unsigned scrutinee = bytecode.m_scrutinee.offset(); |
1009 | |
1010 | // create jump table for switch destinations, track this switch statement. |
1011 | StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); |
1012 | m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset)); |
1013 | |
1014 | emitLoad(scrutinee, regT1, regT0); |
1015 | callOperation(operationSwitchStringWithUnknownKeyType, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), tableIndex); |
1016 | farJump(returnValueGPR, NoPtrTag); |
1017 | } |
1018 | |
1019 | void JIT::emit_op_debug(const Instruction* currentInstruction) |
1020 | { |
1021 | auto bytecode = currentInstruction->as<OpDebug>(); |
1022 | load32(codeBlock()->debuggerRequestsAddress(), regT0); |
1023 | Jump noDebuggerRequests = branchTest32(Zero, regT0); |
1024 | callOperation(operationDebug, &vm(), static_cast<int>(bytecode.m_debugHookType)); |
1025 | noDebuggerRequests.link(this); |
1026 | } |
1027 | |
1028 | |
1029 | void JIT::emit_op_enter(const Instruction* currentInstruction) |
1030 | { |
1031 | emitEnterOptimizationCheck(); |
1032 | |
1033 | // Even though JIT code doesn't use them, we initialize our constant |
1034 | // registers to zap stale pointers, to avoid unnecessarily prolonging |
1035 | // object lifetime and increasing GC pressure. |
1036 | for (int i = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); i < m_codeBlock->numVars(); ++i) |
1037 | emitStore(virtualRegisterForLocal(i).offset(), jsUndefined()); |
1038 | |
1039 | JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter); |
1040 | slowPathCall.call(); |
1041 | } |
1042 | |
1043 | void JIT::emit_op_get_scope(const Instruction* currentInstruction) |
1044 | { |
1045 | auto bytecode = currentInstruction->as<OpGetScope>(); |
1046 | int dst = bytecode.m_dst.offset(); |
1047 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0); |
1048 | loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); |
1049 | emitStoreCell(dst, regT0); |
1050 | } |
1051 | |
1052 | void JIT::emit_op_create_this(const Instruction* currentInstruction) |
1053 | { |
1054 | auto bytecode = currentInstruction->as<OpCreateThis>(); |
1055 | auto& metadata = bytecode.metadata(m_codeBlock); |
1056 | int callee = bytecode.m_callee.offset(); |
1057 | WriteBarrierBase<JSCell>* cachedFunction = &metadata.m_cachedCallee; |
1058 | RegisterID calleeReg = regT0; |
1059 | RegisterID rareDataReg = regT4; |
1060 | RegisterID resultReg = regT0; |
1061 | RegisterID allocatorReg = regT1; |
1062 | RegisterID structureReg = regT2; |
1063 | RegisterID cachedFunctionReg = regT4; |
1064 | RegisterID scratchReg = regT3; |
1065 | |
1066 | emitLoadPayload(callee, calleeReg); |
1067 | addSlowCase(branchIfNotFunction(calleeReg)); |
1068 | loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); |
1069 | addSlowCase(branchTestPtr(Zero, rareDataReg)); |
1070 | load32(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfAllocator()), allocatorReg); |
1071 | loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfStructure()), structureReg); |
1072 | |
1073 | loadPtr(cachedFunction, cachedFunctionReg); |
1074 | Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); |
1075 | addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); |
1076 | hasSeenMultipleCallees.link(this); |
1077 | |
1078 | JumpList slowCases; |
1079 | auto butterfly = TrustedImmPtr(nullptr); |
1080 | emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, scratchReg, slowCases); |
1081 | load8(Address(structureReg, Structure::inlineCapacityOffset()), scratchReg); |
1082 | emitInitializeInlineStorage(resultReg, scratchReg); |
1083 | addSlowCase(slowCases); |
1084 | emitStoreCell(bytecode.m_dst.offset(), resultReg); |
1085 | } |
1086 | |
1087 | void JIT::emit_op_to_this(const Instruction* currentInstruction) |
1088 | { |
1089 | auto bytecode = currentInstruction->as<OpToThis>(); |
1090 | auto& metadata = bytecode.metadata(m_codeBlock); |
1091 | StructureID* cachedStructureID = &metadata.m_cachedStructureID; |
1092 | int thisRegister = bytecode.m_srcDst.offset(); |
1093 | |
1094 | emitLoad(thisRegister, regT3, regT2); |
1095 | |
1096 | addSlowCase(branchIfNotCell(regT3)); |
1097 | addSlowCase(branchIfNotType(regT2, FinalObjectType)); |
1098 | loadPtr(Address(regT2, JSCell::structureIDOffset()), regT0); |
1099 | load32(cachedStructureID, regT2); |
1100 | addSlowCase(branchPtr(NotEqual, regT0, regT2)); |
1101 | } |
1102 | |
1103 | void JIT::emit_op_check_tdz(const Instruction* currentInstruction) |
1104 | { |
1105 | auto bytecode = currentInstruction->as<OpCheckTdz>(); |
1106 | emitLoadTag(bytecode.m_targetVirtualRegister.offset(), regT0); |
1107 | addSlowCase(branchIfEmpty(regT0)); |
1108 | } |
1109 | |
1110 | void JIT::emit_op_has_structure_property(const Instruction* currentInstruction) |
1111 | { |
1112 | auto bytecode = currentInstruction->as<OpHasStructureProperty>(); |
1113 | int dst = bytecode.m_dst.offset(); |
1114 | int base = bytecode.m_base.offset(); |
1115 | int enumerator = bytecode.m_enumerator.offset(); |
1116 | |
1117 | emitLoadPayload(base, regT0); |
1118 | emitJumpSlowCaseIfNotJSCell(base); |
1119 | |
1120 | emitLoadPayload(enumerator, regT1); |
1121 | |
1122 | load32(Address(regT0, JSCell::structureIDOffset()), regT0); |
1123 | addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); |
1124 | |
1125 | move(TrustedImm32(1), regT0); |
1126 | emitStoreBool(dst, regT0); |
1127 | } |
1128 | |
1129 | void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) |
1130 | { |
1131 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
1132 | |
1133 | PatchableJump badType; |
1134 | |
1135 | // FIXME: Add support for other types like TypedArrays and Arguments. |
1136 | // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. |
1137 | JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); |
1138 | move(TrustedImm32(1), regT0); |
1139 | Jump done = jump(); |
1140 | |
1141 | LinkBuffer patchBuffer(*this, m_codeBlock); |
1142 | |
1143 | patchBuffer.link(badType, byValInfo->slowPathTarget); |
1144 | patchBuffer.link(slowCases, byValInfo->slowPathTarget); |
1145 | |
1146 | patchBuffer.link(done, byValInfo->badTypeDoneTarget); |
1147 | |
1148 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1149 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1150 | "Baseline has_indexed_property stub for %s, return point %p" , toCString(*m_codeBlock).data(), returnAddress.value()); |
1151 | |
1152 | MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code())); |
1153 | MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationHasIndexedPropertyGeneric)); |
1154 | } |
1155 | |
1156 | void JIT::emit_op_has_indexed_property(const Instruction* currentInstruction) |
1157 | { |
1158 | auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); |
1159 | auto& metadata = bytecode.metadata(m_codeBlock); |
1160 | int dst = bytecode.m_dst.offset(); |
1161 | int base = bytecode.m_base.offset(); |
1162 | int property = bytecode.m_property.offset(); |
1163 | ArrayProfile* profile = &metadata.m_arrayProfile; |
1164 | ByValInfo* byValInfo = m_codeBlock->addByValInfo(); |
1165 | |
1166 | emitLoadPayload(base, regT0); |
1167 | emitJumpSlowCaseIfNotJSCell(base); |
1168 | |
1169 | emitLoad(property, regT3, regT1); |
1170 | addSlowCase(branchIfNotInt32(regT3)); |
1171 | |
1172 | // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. |
1173 | // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if |
1174 | // number was signed since m_vectorLength is always less than intmax (since the total allocation |
1175 | // size is always less than 4Gb). As such zero extending will have been correct (and extending the value |
1176 | // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign |
1177 | // extending since it makes it easier to re-tag the value in the slow case. |
1178 | zeroExtend32ToPtr(regT1, regT1); |
1179 | |
1180 | emitArrayProfilingSiteWithCell(regT0, regT2, profile); |
1181 | and32(TrustedImm32(IndexingShapeMask), regT2); |
1182 | |
1183 | JITArrayMode mode = chooseArrayMode(profile); |
1184 | PatchableJump badType; |
1185 | |
1186 | // FIXME: Add support for other types like TypedArrays and Arguments. |
1187 | // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. |
1188 | JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); |
1189 | move(TrustedImm32(1), regT0); |
1190 | |
1191 | addSlowCase(badType); |
1192 | addSlowCase(slowCases); |
1193 | |
1194 | Label done = label(); |
1195 | |
1196 | emitStoreBool(dst, regT0); |
1197 | |
1198 | Label nextHotPath = label(); |
1199 | |
1200 | m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeIndex, PatchableJump(), badType, mode, profile, done, nextHotPath)); |
1201 | } |
1202 | |
1203 | void JIT::emitSlow_op_has_indexed_property(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1204 | { |
1205 | linkAllSlowCases(iter); |
1206 | |
1207 | auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); |
1208 | int dst = bytecode.m_dst.offset(); |
1209 | int base = bytecode.m_base.offset(); |
1210 | int property = bytecode.m_property.offset(); |
1211 | ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; |
1212 | |
1213 | Label slowPath = label(); |
1214 | |
1215 | emitLoad(base, regT1, regT0); |
1216 | emitLoad(property, regT3, regT2); |
1217 | Call call = callOperation(operationHasIndexedPropertyDefault, dst, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2), byValInfo); |
1218 | |
1219 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; |
1220 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; |
1221 | m_byValInstructionIndex++; |
1222 | } |
1223 | |
1224 | void JIT::emit_op_get_direct_pname(const Instruction* currentInstruction) |
1225 | { |
1226 | auto bytecode = currentInstruction->as<OpGetDirectPname>(); |
1227 | int dst = bytecode.m_dst.offset(); |
1228 | int base = bytecode.m_base.offset(); |
1229 | int index = bytecode.m_index.offset(); |
1230 | int enumerator = bytecode.m_enumerator.offset(); |
1231 | |
1232 | // Check that base is a cell |
1233 | emitLoadPayload(base, regT0); |
1234 | emitJumpSlowCaseIfNotJSCell(base); |
1235 | |
1236 | // Check the structure |
1237 | emitLoadPayload(enumerator, regT1); |
1238 | load32(Address(regT0, JSCell::structureIDOffset()), regT2); |
1239 | addSlowCase(branch32(NotEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); |
1240 | |
1241 | // Compute the offset |
1242 | emitLoadPayload(index, regT2); |
1243 | // If index is less than the enumerator's cached inline storage, then it's an inline access |
1244 | Jump outOfLineAccess = branch32(AboveOrEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); |
1245 | addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); |
1246 | load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); |
1247 | load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); |
1248 | |
1249 | Jump done = jump(); |
1250 | |
1251 | // Otherwise it's out of line |
1252 | outOfLineAccess.link(this); |
1253 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); |
1254 | sub32(Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT2); |
1255 | neg32(regT2); |
1256 | int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); |
1257 | load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); |
1258 | load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); |
1259 | |
1260 | done.link(this); |
1261 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
1262 | emitStore(dst, regT1, regT0); |
1263 | } |
1264 | |
1265 | void JIT::emit_op_enumerator_structure_pname(const Instruction* currentInstruction) |
1266 | { |
1267 | auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>(); |
1268 | int dst = bytecode.m_dst.offset(); |
1269 | int enumerator = bytecode.m_enumerator.offset(); |
1270 | int index = bytecode.m_index.offset(); |
1271 | |
1272 | emitLoadPayload(index, regT0); |
1273 | emitLoadPayload(enumerator, regT1); |
1274 | Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); |
1275 | |
1276 | move(TrustedImm32(JSValue::NullTag), regT2); |
1277 | move(TrustedImm32(0), regT0); |
1278 | |
1279 | Jump done = jump(); |
1280 | inBounds.link(this); |
1281 | |
1282 | loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); |
1283 | loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); |
1284 | move(TrustedImm32(JSValue::CellTag), regT2); |
1285 | |
1286 | done.link(this); |
1287 | emitStore(dst, regT2, regT0); |
1288 | } |
1289 | |
1290 | void JIT::emit_op_enumerator_generic_pname(const Instruction* currentInstruction) |
1291 | { |
1292 | auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>(); |
1293 | int dst = bytecode.m_dst.offset(); |
1294 | int enumerator = bytecode.m_enumerator.offset(); |
1295 | int index = bytecode.m_index.offset(); |
1296 | |
1297 | emitLoadPayload(index, regT0); |
1298 | emitLoadPayload(enumerator, regT1); |
1299 | Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); |
1300 | |
1301 | move(TrustedImm32(JSValue::NullTag), regT2); |
1302 | move(TrustedImm32(0), regT0); |
1303 | |
1304 | Jump done = jump(); |
1305 | inBounds.link(this); |
1306 | |
1307 | loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); |
1308 | loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); |
1309 | move(TrustedImm32(JSValue::CellTag), regT2); |
1310 | |
1311 | done.link(this); |
1312 | emitStore(dst, regT2, regT0); |
1313 | } |
1314 | |
1315 | void JIT::emit_op_profile_type(const Instruction* currentInstruction) |
1316 | { |
1317 | auto bytecode = currentInstruction->as<OpProfileType>(); |
1318 | auto& metadata = bytecode.metadata(m_codeBlock); |
1319 | TypeLocation* cachedTypeLocation = metadata.m_typeLocation; |
1320 | int valueToProfile = bytecode.m_targetVirtualRegister.offset(); |
1321 | |
1322 | // Load payload in T0. Load tag in T3. |
1323 | emitLoadPayload(valueToProfile, regT0); |
1324 | emitLoadTag(valueToProfile, regT3); |
1325 | |
1326 | JumpList jumpToEnd; |
1327 | |
1328 | jumpToEnd.append(branchIfEmpty(regT3)); |
1329 | |
1330 | // Compile in a predictive type check, if possible, to see if we can skip writing to the log. |
1331 | // These typechecks are inlined to match those of the 32-bit JSValue type checks. |
1332 | if (cachedTypeLocation->m_lastSeenType == TypeUndefined) |
1333 | jumpToEnd.append(branchIfUndefined(regT3)); |
1334 | else if (cachedTypeLocation->m_lastSeenType == TypeNull) |
1335 | jumpToEnd.append(branchIfNull(regT3)); |
1336 | else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) |
1337 | jumpToEnd.append(branchIfBoolean(regT3, InvalidGPRReg)); |
1338 | else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt) |
1339 | jumpToEnd.append(branchIfInt32(regT3)); |
1340 | else if (cachedTypeLocation->m_lastSeenType == TypeNumber) { |
1341 | jumpToEnd.append(branchIfNumber(JSValueRegs(regT3, regT0), regT1)); |
1342 | } else if (cachedTypeLocation->m_lastSeenType == TypeString) { |
1343 | Jump isNotCell = branchIfNotCell(regT3); |
1344 | jumpToEnd.append(branchIfString(regT0)); |
1345 | isNotCell.link(this); |
1346 | } |
1347 | |
1348 | // Load the type profiling log into T2. |
1349 | TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); |
1350 | move(TrustedImmPtr(cachedTypeProfilerLog), regT2); |
1351 | |
1352 | // Load the next log entry into T1. |
1353 | loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); |
1354 | |
1355 | // Store the JSValue onto the log entry. |
1356 | store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); |
1357 | store32(regT3, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); |
1358 | |
1359 | // Store the structureID of the cell if argument is a cell, otherwise, store 0 on the log entry. |
1360 | Jump notCell = branchIfNotCell(regT3); |
1361 | load32(Address(regT0, JSCell::structureIDOffset()), regT0); |
1362 | store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); |
1363 | Jump skipNotCell = jump(); |
1364 | notCell.link(this); |
1365 | store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); |
1366 | skipNotCell.link(this); |
1367 | |
1368 | // Store the typeLocation on the log entry. |
1369 | move(TrustedImmPtr(cachedTypeLocation), regT0); |
1370 | store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); |
1371 | |
1372 | // Increment the current log entry. |
1373 | addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); |
1374 | store32(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); |
1375 | jumpToEnd.append(branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()))); |
1376 | // Clear the log if we're at the end of the log. |
1377 | callOperation(operationProcessTypeProfilerLog, &vm()); |
1378 | |
1379 | jumpToEnd.link(this); |
1380 | } |
1381 | |
1382 | void JIT::emit_op_log_shadow_chicken_prologue(const Instruction* currentInstruction) |
1383 | { |
1384 | RELEASE_ASSERT(vm().shadowChicken()); |
1385 | updateTopCallFrame(); |
1386 | static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true." ); |
1387 | auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>(); |
1388 | GPRReg shadowPacketReg = regT0; |
1389 | GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. |
1390 | GPRReg scratch2Reg = regT2; |
1391 | ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg); |
1392 | |
1393 | scratch1Reg = regT4; |
1394 | emitLoadPayload(bytecode.m_scope.offset(), regT3); |
1395 | logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3); |
1396 | } |
1397 | |
1398 | void JIT::emit_op_log_shadow_chicken_tail(const Instruction* currentInstruction) |
1399 | { |
1400 | RELEASE_ASSERT(vm().shadowChicken()); |
1401 | updateTopCallFrame(); |
1402 | static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true." ); |
1403 | auto bytecode = currentInstruction->as<OpLogShadowChickenTail>(); |
1404 | GPRReg shadowPacketReg = regT0; |
1405 | GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. |
1406 | GPRReg scratch2Reg = regT2; |
1407 | ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg); |
1408 | emitLoadPayload(bytecode.m_thisValue.offset(), regT2); |
1409 | emitLoadTag(bytecode.m_thisValue.offset(), regT1); |
1410 | JSValueRegs thisRegs(regT1, regT2); |
1411 | emitLoadPayload(bytecode.m_scope.offset(), regT3); |
1412 | logShadowChickenTailPacket(shadowPacketReg, thisRegs, regT3, m_codeBlock, CallSiteIndex(BytecodeIndex(bitwise_cast<uint32_t>(currentInstruction)))); |
1413 | } |
1414 | |
1415 | } // namespace JSC |
1416 | |
1417 | #endif // USE(JSVALUE32_64) |
1418 | #endif // ENABLE(JIT) |
1419 | |