1 | /* |
2 | * Copyright (C) 2009-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2010 Patrick Gansterer <[email protected]> |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * 1. Redistributions of source code must retain the above copyright |
9 | * notice, this list of conditions and the following disclaimer. |
10 | * 2. Redistributions in binary form must reproduce the above copyright |
11 | * notice, this list of conditions and the following disclaimer in the |
12 | * documentation and/or other materials provided with the distribution. |
13 | * |
14 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | */ |
26 | |
27 | #include "config.h" |
28 | #if ENABLE(JIT) |
29 | #include "JIT.h" |
30 | |
31 | #include "BasicBlockLocation.h" |
32 | #include "BytecodeGenerator.h" |
33 | #include "Exception.h" |
34 | #include "Heap.h" |
35 | #include "InterpreterInlines.h" |
36 | #include "JITInlines.h" |
37 | #include "JSArray.h" |
38 | #include "JSCast.h" |
39 | #include "JSFunction.h" |
40 | #include "JSPropertyNameEnumerator.h" |
41 | #include "LinkBuffer.h" |
42 | #include "MaxFrameExtentForSlowPathCall.h" |
43 | #include "OpcodeInlines.h" |
44 | #include "SlowPathCall.h" |
45 | #include "SuperSampler.h" |
46 | #include "ThunkGenerators.h" |
47 | #include "TypeLocation.h" |
48 | #include "TypeProfilerLog.h" |
49 | #include "VirtualRegister.h" |
50 | #include "Watchdog.h" |
51 | |
52 | namespace JSC { |
53 | |
54 | #if USE(JSVALUE64) |
55 | |
56 | void JIT::emit_op_mov(const Instruction* currentInstruction) |
57 | { |
58 | auto bytecode = currentInstruction->as<OpMov>(); |
59 | int dst = bytecode.m_dst.offset(); |
60 | int src = bytecode.m_src.offset(); |
61 | |
62 | if (m_codeBlock->isConstantRegisterIndex(src)) { |
63 | JSValue value = m_codeBlock->getConstant(src); |
64 | if (!value.isNumber()) |
65 | store64(TrustedImm64(JSValue::encode(value)), addressFor(dst)); |
66 | else |
67 | store64(Imm64(JSValue::encode(value)), addressFor(dst)); |
68 | return; |
69 | } |
70 | |
71 | load64(addressFor(src), regT0); |
72 | store64(regT0, addressFor(dst)); |
73 | } |
74 | |
75 | |
76 | void JIT::emit_op_end(const Instruction* currentInstruction) |
77 | { |
78 | auto bytecode = currentInstruction->as<OpEnd>(); |
79 | RELEASE_ASSERT(returnValueGPR != callFrameRegister); |
80 | emitGetVirtualRegister(bytecode.m_value.offset(), returnValueGPR); |
81 | emitRestoreCalleeSaves(); |
82 | emitFunctionEpilogue(); |
83 | ret(); |
84 | } |
85 | |
86 | void JIT::emit_op_jmp(const Instruction* currentInstruction) |
87 | { |
88 | auto bytecode = currentInstruction->as<OpJmp>(); |
89 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
90 | addJump(jump(), target); |
91 | } |
92 | |
93 | void JIT::emit_op_new_object(const Instruction* currentInstruction) |
94 | { |
95 | auto bytecode = currentInstruction->as<OpNewObject>(); |
96 | auto& metadata = bytecode.metadata(m_codeBlock); |
97 | Structure* structure = metadata.m_objectAllocationProfile.structure(); |
98 | size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); |
99 | Allocator allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(*m_vm, allocationSize, AllocatorForMode::AllocatorIfExists); |
100 | |
101 | RegisterID resultReg = regT0; |
102 | RegisterID allocatorReg = regT1; |
103 | RegisterID scratchReg = regT2; |
104 | |
105 | if (!allocator) |
106 | addSlowCase(jump()); |
107 | else { |
108 | JumpList slowCases; |
109 | auto butterfly = TrustedImmPtr(nullptr); |
110 | emitAllocateJSObject(resultReg, JITAllocator::constant(allocator), allocatorReg, TrustedImmPtr(structure), butterfly, scratchReg, slowCases); |
111 | emitInitializeInlineStorage(resultReg, structure->inlineCapacity()); |
112 | addSlowCase(slowCases); |
113 | emitPutVirtualRegister(bytecode.m_dst.offset()); |
114 | } |
115 | } |
116 | |
117 | void JIT::emitSlow_op_new_object(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
118 | { |
119 | linkAllSlowCases(iter); |
120 | |
121 | auto bytecode = currentInstruction->as<OpNewObject>(); |
122 | auto& metadata = bytecode.metadata(m_codeBlock); |
123 | int dst = bytecode.m_dst.offset(); |
124 | Structure* structure = metadata.m_objectAllocationProfile.structure(); |
125 | callOperation(operationNewObject, TrustedImmPtr(&vm()), structure); |
126 | emitStoreCell(dst, returnValueGPR); |
127 | } |
128 | |
129 | void JIT::emit_op_overrides_has_instance(const Instruction* currentInstruction) |
130 | { |
131 | auto bytecode = currentInstruction->as<OpOverridesHasInstance>(); |
132 | int dst = bytecode.m_dst.offset(); |
133 | int constructor = bytecode.m_constructor.offset(); |
134 | int hasInstanceValue = bytecode.m_hasInstanceValue.offset(); |
135 | |
136 | emitGetVirtualRegister(hasInstanceValue, regT0); |
137 | |
138 | // We don't jump if we know what Symbol.hasInstance would do. |
139 | Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction())); |
140 | |
141 | emitGetVirtualRegister(constructor, regT0); |
142 | |
143 | // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function. |
144 | test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0); |
145 | boxBoolean(regT0, JSValueRegs { regT0 }); |
146 | Jump done = jump(); |
147 | |
148 | customhasInstanceValue.link(this); |
149 | move(TrustedImm32(JSValue::ValueTrue), regT0); |
150 | |
151 | done.link(this); |
152 | emitPutVirtualRegister(dst); |
153 | } |
154 | |
155 | void JIT::emit_op_instanceof(const Instruction* currentInstruction) |
156 | { |
157 | auto bytecode = currentInstruction->as<OpInstanceof>(); |
158 | int dst = bytecode.m_dst.offset(); |
159 | int value = bytecode.m_value.offset(); |
160 | int proto = bytecode.m_prototype.offset(); |
161 | |
162 | // Load the operands (baseVal, proto, and value respectively) into registers. |
163 | // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. |
164 | emitGetVirtualRegister(value, regT2); |
165 | emitGetVirtualRegister(proto, regT1); |
166 | |
167 | // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance. |
168 | emitJumpSlowCaseIfNotJSCell(regT2, value); |
169 | emitJumpSlowCaseIfNotJSCell(regT1, proto); |
170 | |
171 | JITInstanceOfGenerator gen( |
172 | m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), |
173 | RegisterSet::stubUnavailableRegisters(), |
174 | regT0, // result |
175 | regT2, // value |
176 | regT1, // proto |
177 | regT3, regT4); // scratch |
178 | gen.generateFastPath(*this); |
179 | m_instanceOfs.append(gen); |
180 | |
181 | emitPutVirtualRegister(dst); |
182 | } |
183 | |
184 | void JIT::emitSlow_op_instanceof(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
185 | { |
186 | linkAllSlowCases(iter); |
187 | |
188 | auto bytecode = currentInstruction->as<OpInstanceof>(); |
189 | int resultVReg = bytecode.m_dst.offset(); |
190 | |
191 | JITInstanceOfGenerator& gen = m_instanceOfs[m_instanceOfIndex++]; |
192 | |
193 | Label coldPathBegin = label(); |
194 | Call call = callOperation(operationInstanceOfOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT2, regT1); |
195 | gen.reportSlowPathCall(coldPathBegin, call); |
196 | } |
197 | |
198 | void JIT::emit_op_instanceof_custom(const Instruction*) |
199 | { |
200 | // This always goes to slow path since we expect it to be rare. |
201 | addSlowCase(jump()); |
202 | } |
203 | |
204 | void JIT::emit_op_is_empty(const Instruction* currentInstruction) |
205 | { |
206 | auto bytecode = currentInstruction->as<OpIsEmpty>(); |
207 | int dst = bytecode.m_dst.offset(); |
208 | int value = bytecode.m_operand.offset(); |
209 | |
210 | emitGetVirtualRegister(value, regT0); |
211 | compare64(Equal, regT0, TrustedImm32(JSValue::encode(JSValue())), regT0); |
212 | |
213 | boxBoolean(regT0, JSValueRegs { regT0 }); |
214 | emitPutVirtualRegister(dst); |
215 | } |
216 | |
217 | void JIT::emit_op_is_undefined(const Instruction* currentInstruction) |
218 | { |
219 | auto bytecode = currentInstruction->as<OpIsUndefined>(); |
220 | int dst = bytecode.m_dst.offset(); |
221 | int value = bytecode.m_operand.offset(); |
222 | |
223 | emitGetVirtualRegister(value, regT0); |
224 | Jump isCell = branchIfCell(regT0); |
225 | |
226 | compare64(Equal, regT0, TrustedImm32(JSValue::ValueUndefined), regT0); |
227 | Jump done = jump(); |
228 | |
229 | isCell.link(this); |
230 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
231 | move(TrustedImm32(0), regT0); |
232 | Jump notMasqueradesAsUndefined = jump(); |
233 | |
234 | isMasqueradesAsUndefined.link(this); |
235 | emitLoadStructure(vm(), regT0, regT1, regT2); |
236 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
237 | loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); |
238 | comparePtr(Equal, regT0, regT1, regT0); |
239 | |
240 | notMasqueradesAsUndefined.link(this); |
241 | done.link(this); |
242 | boxBoolean(regT0, JSValueRegs { regT0 }); |
243 | emitPutVirtualRegister(dst); |
244 | } |
245 | |
246 | void JIT::emit_op_is_undefined_or_null(const Instruction* currentInstruction) |
247 | { |
248 | auto bytecode = currentInstruction->as<OpIsUndefinedOrNull>(); |
249 | int dst = bytecode.m_dst.offset(); |
250 | int value = bytecode.m_operand.offset(); |
251 | |
252 | emitGetVirtualRegister(value, regT0); |
253 | |
254 | and64(TrustedImm32(~JSValue::UndefinedTag), regT0); |
255 | compare64(Equal, regT0, TrustedImm32(JSValue::ValueNull), regT0); |
256 | |
257 | boxBoolean(regT0, JSValueRegs { regT0 }); |
258 | emitPutVirtualRegister(dst); |
259 | } |
260 | |
261 | void JIT::emit_op_is_boolean(const Instruction* currentInstruction) |
262 | { |
263 | auto bytecode = currentInstruction->as<OpIsBoolean>(); |
264 | int dst = bytecode.m_dst.offset(); |
265 | int value = bytecode.m_operand.offset(); |
266 | |
267 | emitGetVirtualRegister(value, regT0); |
268 | xor64(TrustedImm32(JSValue::ValueFalse), regT0); |
269 | test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0); |
270 | boxBoolean(regT0, JSValueRegs { regT0 }); |
271 | emitPutVirtualRegister(dst); |
272 | } |
273 | |
274 | void JIT::emit_op_is_number(const Instruction* currentInstruction) |
275 | { |
276 | auto bytecode = currentInstruction->as<OpIsNumber>(); |
277 | int dst = bytecode.m_dst.offset(); |
278 | int value = bytecode.m_operand.offset(); |
279 | |
280 | emitGetVirtualRegister(value, regT0); |
281 | test64(NonZero, regT0, numberTagRegister, regT0); |
282 | boxBoolean(regT0, JSValueRegs { regT0 }); |
283 | emitPutVirtualRegister(dst); |
284 | } |
285 | |
286 | void JIT::emit_op_is_cell_with_type(const Instruction* currentInstruction) |
287 | { |
288 | auto bytecode = currentInstruction->as<OpIsCellWithType>(); |
289 | int dst = bytecode.m_dst.offset(); |
290 | int value = bytecode.m_operand.offset(); |
291 | int type = bytecode.m_type; |
292 | |
293 | emitGetVirtualRegister(value, regT0); |
294 | Jump isNotCell = branchIfNotCell(regT0); |
295 | |
296 | compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(type), regT0); |
297 | boxBoolean(regT0, JSValueRegs { regT0 }); |
298 | Jump done = jump(); |
299 | |
300 | isNotCell.link(this); |
301 | move(TrustedImm32(JSValue::ValueFalse), regT0); |
302 | |
303 | done.link(this); |
304 | emitPutVirtualRegister(dst); |
305 | } |
306 | |
307 | void JIT::emit_op_is_object(const Instruction* currentInstruction) |
308 | { |
309 | auto bytecode = currentInstruction->as<OpIsObject>(); |
310 | int dst = bytecode.m_dst.offset(); |
311 | int value = bytecode.m_operand.offset(); |
312 | |
313 | emitGetVirtualRegister(value, regT0); |
314 | Jump isNotCell = branchIfNotCell(regT0); |
315 | |
316 | compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); |
317 | boxBoolean(regT0, JSValueRegs { regT0 }); |
318 | Jump done = jump(); |
319 | |
320 | isNotCell.link(this); |
321 | move(TrustedImm32(JSValue::ValueFalse), regT0); |
322 | |
323 | done.link(this); |
324 | emitPutVirtualRegister(dst); |
325 | } |
326 | |
327 | void JIT::emit_op_ret(const Instruction* currentInstruction) |
328 | { |
329 | ASSERT(callFrameRegister != regT1); |
330 | ASSERT(regT1 != returnValueGPR); |
331 | ASSERT(returnValueGPR != callFrameRegister); |
332 | |
333 | // Return the result in %eax. |
334 | auto bytecode = currentInstruction->as<OpRet>(); |
335 | emitGetVirtualRegister(bytecode.m_value.offset(), returnValueGPR); |
336 | |
337 | checkStackPointerAlignment(); |
338 | emitRestoreCalleeSaves(); |
339 | emitFunctionEpilogue(); |
340 | ret(); |
341 | } |
342 | |
343 | void JIT::emit_op_to_primitive(const Instruction* currentInstruction) |
344 | { |
345 | auto bytecode = currentInstruction->as<OpToPrimitive>(); |
346 | int dst = bytecode.m_dst.offset(); |
347 | int src = bytecode.m_src.offset(); |
348 | |
349 | emitGetVirtualRegister(src, regT0); |
350 | |
351 | Jump isImm = branchIfNotCell(regT0); |
352 | addSlowCase(branchIfObject(regT0)); |
353 | isImm.link(this); |
354 | |
355 | if (dst != src) |
356 | emitPutVirtualRegister(dst); |
357 | |
358 | } |
359 | |
360 | void JIT::emit_op_set_function_name(const Instruction* currentInstruction) |
361 | { |
362 | auto bytecode = currentInstruction->as<OpSetFunctionName>(); |
363 | emitGetVirtualRegister(bytecode.m_function.offset(), regT0); |
364 | emitGetVirtualRegister(bytecode.m_name.offset(), regT1); |
365 | callOperation(operationSetFunctionName, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1); |
366 | } |
367 | |
368 | void JIT::emit_op_not(const Instruction* currentInstruction) |
369 | { |
370 | auto bytecode = currentInstruction->as<OpNot>(); |
371 | emitGetVirtualRegister(bytecode.m_operand.offset(), regT0); |
372 | |
373 | // Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be |
374 | // clear other than the low bit (which will be 0 or 1 for false or true inputs respectively). |
375 | // Then invert against JSValue(true), which will add the tag back in, and flip the low bit. |
376 | xor64(TrustedImm32(JSValue::ValueFalse), regT0); |
377 | addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~1)))); |
378 | xor64(TrustedImm32(JSValue::ValueTrue), regT0); |
379 | |
380 | emitPutVirtualRegister(bytecode.m_dst.offset()); |
381 | } |
382 | |
383 | void JIT::emit_op_jfalse(const Instruction* currentInstruction) |
384 | { |
385 | auto bytecode = currentInstruction->as<OpJfalse>(); |
386 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
387 | |
388 | GPRReg value = regT0; |
389 | GPRReg scratch1 = regT1; |
390 | GPRReg scratch2 = regT2; |
391 | bool shouldCheckMasqueradesAsUndefined = true; |
392 | |
393 | emitGetVirtualRegister(bytecode.m_condition.offset(), value); |
394 | addJump(branchIfFalsey(vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); |
395 | } |
396 | |
397 | void JIT::emit_op_jeq_null(const Instruction* currentInstruction) |
398 | { |
399 | auto bytecode = currentInstruction->as<OpJeqNull>(); |
400 | int src = bytecode.m_value.offset(); |
401 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
402 | |
403 | emitGetVirtualRegister(src, regT0); |
404 | Jump isImmediate = branchIfNotCell(regT0); |
405 | |
406 | // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. |
407 | Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
408 | emitLoadStructure(vm(), regT0, regT2, regT1); |
409 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
410 | addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); |
411 | Jump masqueradesGlobalObjectIsForeign = jump(); |
412 | |
413 | // Now handle the immediate cases - undefined & null |
414 | isImmediate.link(this); |
415 | and64(TrustedImm32(~JSValue::UndefinedTag), regT0); |
416 | addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))), target); |
417 | |
418 | isNotMasqueradesAsUndefined.link(this); |
419 | masqueradesGlobalObjectIsForeign.link(this); |
420 | }; |
421 | void JIT::emit_op_jneq_null(const Instruction* currentInstruction) |
422 | { |
423 | auto bytecode = currentInstruction->as<OpJneqNull>(); |
424 | int src = bytecode.m_value.offset(); |
425 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
426 | |
427 | emitGetVirtualRegister(src, regT0); |
428 | Jump isImmediate = branchIfNotCell(regT0); |
429 | |
430 | // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. |
431 | addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); |
432 | emitLoadStructure(vm(), regT0, regT2, regT1); |
433 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
434 | addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); |
435 | Jump wasNotImmediate = jump(); |
436 | |
437 | // Now handle the immediate cases - undefined & null |
438 | isImmediate.link(this); |
439 | and64(TrustedImm32(~JSValue::UndefinedTag), regT0); |
440 | addJump(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsNull()))), target); |
441 | |
442 | wasNotImmediate.link(this); |
443 | } |
444 | |
445 | void JIT::emit_op_jundefined_or_null(const Instruction* currentInstruction) |
446 | { |
447 | auto bytecode = currentInstruction->as<OpJundefinedOrNull>(); |
448 | int value = bytecode.m_value.offset(); |
449 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
450 | |
451 | emitGetVirtualRegister(value, regT0); |
452 | |
453 | and64(TrustedImm32(~JSValue::UndefinedTag), regT0); |
454 | addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))), target); |
455 | } |
456 | |
457 | void JIT::emit_op_jnundefined_or_null(const Instruction* currentInstruction) |
458 | { |
459 | auto bytecode = currentInstruction->as<OpJnundefinedOrNull>(); |
460 | int value = bytecode.m_value.offset(); |
461 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
462 | |
463 | emitGetVirtualRegister(value, regT0); |
464 | |
465 | and64(TrustedImm32(~JSValue::UndefinedTag), regT0); |
466 | addJump(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsNull()))), target); |
467 | } |
468 | |
469 | void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction) |
470 | { |
471 | auto bytecode = currentInstruction->as<OpJneqPtr>(); |
472 | auto& metadata = bytecode.metadata(m_codeBlock); |
473 | int src = bytecode.m_value.offset(); |
474 | JSValue specialPointer = getConstantOperand(bytecode.m_specialPointer.offset()); |
475 | ASSERT(specialPointer.isCell()); |
476 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
477 | |
478 | emitGetVirtualRegister(src, regT0); |
479 | CCallHelpers::Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(specialPointer.asCell())); |
480 | store8(TrustedImm32(1), &metadata.m_hasJumped); |
481 | addJump(jump(), target); |
482 | equal.link(this); |
483 | } |
484 | |
485 | void JIT::emit_op_eq(const Instruction* currentInstruction) |
486 | { |
487 | auto bytecode = currentInstruction->as<OpEq>(); |
488 | emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1); |
489 | emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); |
490 | compare32(Equal, regT1, regT0, regT0); |
491 | boxBoolean(regT0, JSValueRegs { regT0 }); |
492 | emitPutVirtualRegister(bytecode.m_dst.offset()); |
493 | } |
494 | |
495 | void JIT::emit_op_jeq(const Instruction* currentInstruction) |
496 | { |
497 | auto bytecode = currentInstruction->as<OpJeq>(); |
498 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
499 | emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1); |
500 | emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); |
501 | addJump(branch32(Equal, regT0, regT1), target); |
502 | } |
503 | |
504 | void JIT::emit_op_jtrue(const Instruction* currentInstruction) |
505 | { |
506 | auto bytecode = currentInstruction->as<OpJtrue>(); |
507 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
508 | |
509 | GPRReg value = regT0; |
510 | GPRReg scratch1 = regT1; |
511 | GPRReg scratch2 = regT2; |
512 | bool shouldCheckMasqueradesAsUndefined = true; |
513 | emitGetVirtualRegister(bytecode.m_condition.offset(), value); |
514 | addJump(branchIfTruthy(vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); |
515 | } |
516 | |
517 | void JIT::emit_op_neq(const Instruction* currentInstruction) |
518 | { |
519 | auto bytecode = currentInstruction->as<OpNeq>(); |
520 | emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1); |
521 | emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); |
522 | compare32(NotEqual, regT1, regT0, regT0); |
523 | boxBoolean(regT0, JSValueRegs { regT0 }); |
524 | |
525 | emitPutVirtualRegister(bytecode.m_dst.offset()); |
526 | } |
527 | |
528 | void JIT::emit_op_jneq(const Instruction* currentInstruction) |
529 | { |
530 | auto bytecode = currentInstruction->as<OpJneq>(); |
531 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
532 | emitGetVirtualRegisters(bytecode.m_lhs.offset(), regT0, bytecode.m_rhs.offset(), regT1); |
533 | emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); |
534 | addJump(branch32(NotEqual, regT0, regT1), target); |
535 | } |
536 | |
537 | void JIT::emit_op_throw(const Instruction* currentInstruction) |
538 | { |
539 | auto bytecode = currentInstruction->as<OpThrow>(); |
540 | ASSERT(regT0 == returnValueGPR); |
541 | copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm().topEntryFrame); |
542 | emitGetVirtualRegister(bytecode.m_value.offset(), regT0); |
543 | callOperationNoExceptionCheck(operationThrow, TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
544 | jumpToExceptionHandler(vm()); |
545 | } |
546 | |
547 | template<typename Op> |
548 | void JIT::compileOpStrictEq(const Instruction* currentInstruction, CompileOpStrictEqType type) |
549 | { |
550 | auto bytecode = currentInstruction->as<Op>(); |
551 | int dst = bytecode.m_dst.offset(); |
552 | int src1 = bytecode.m_lhs.offset(); |
553 | int src2 = bytecode.m_rhs.offset(); |
554 | |
555 | emitGetVirtualRegisters(src1, regT0, src2, regT1); |
556 | |
557 | // Jump slow if both are cells (to cover strings). |
558 | move(regT0, regT2); |
559 | or64(regT1, regT2); |
560 | addSlowCase(branchIfCell(regT2)); |
561 | |
562 | // Jump slow if either is a double. First test if it's an integer, which is fine, and then test |
563 | // if it's a double. |
564 | Jump leftOK = branchIfInt32(regT0); |
565 | addSlowCase(branchIfNumber(regT0)); |
566 | leftOK.link(this); |
567 | Jump rightOK = branchIfInt32(regT1); |
568 | addSlowCase(branchIfNumber(regT1)); |
569 | rightOK.link(this); |
570 | |
571 | if (type == CompileOpStrictEqType::StrictEq) |
572 | compare64(Equal, regT1, regT0, regT0); |
573 | else |
574 | compare64(NotEqual, regT1, regT0, regT0); |
575 | boxBoolean(regT0, JSValueRegs { regT0 }); |
576 | |
577 | emitPutVirtualRegister(dst); |
578 | } |
579 | |
580 | void JIT::emit_op_stricteq(const Instruction* currentInstruction) |
581 | { |
582 | compileOpStrictEq<OpStricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); |
583 | } |
584 | |
585 | void JIT::emit_op_nstricteq(const Instruction* currentInstruction) |
586 | { |
587 | compileOpStrictEq<OpNstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); |
588 | } |
589 | |
590 | template<typename Op> |
591 | void JIT::compileOpStrictEqJump(const Instruction* currentInstruction, CompileOpStrictEqType type) |
592 | { |
593 | auto bytecode = currentInstruction->as<Op>(); |
594 | int target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
595 | int src1 = bytecode.m_lhs.offset(); |
596 | int src2 = bytecode.m_rhs.offset(); |
597 | |
598 | emitGetVirtualRegisters(src1, regT0, src2, regT1); |
599 | |
600 | // Jump slow if both are cells (to cover strings). |
601 | move(regT0, regT2); |
602 | or64(regT1, regT2); |
603 | addSlowCase(branchIfCell(regT2)); |
604 | |
605 | // Jump slow if either is a double. First test if it's an integer, which is fine, and then test |
606 | // if it's a double. |
607 | Jump leftOK = branchIfInt32(regT0); |
608 | addSlowCase(branchIfNumber(regT0)); |
609 | leftOK.link(this); |
610 | Jump rightOK = branchIfInt32(regT1); |
611 | addSlowCase(branchIfNumber(regT1)); |
612 | rightOK.link(this); |
613 | |
614 | if (type == CompileOpStrictEqType::StrictEq) |
615 | addJump(branch64(Equal, regT1, regT0), target); |
616 | else |
617 | addJump(branch64(NotEqual, regT1, regT0), target); |
618 | } |
619 | |
620 | void JIT::emit_op_jstricteq(const Instruction* currentInstruction) |
621 | { |
622 | compileOpStrictEqJump<OpJstricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); |
623 | } |
624 | |
625 | void JIT::emit_op_jnstricteq(const Instruction* currentInstruction) |
626 | { |
627 | compileOpStrictEqJump<OpJnstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); |
628 | } |
629 | |
630 | void JIT::emitSlow_op_jstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
631 | { |
632 | linkAllSlowCases(iter); |
633 | |
634 | auto bytecode = currentInstruction->as<OpJstricteq>(); |
635 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
636 | callOperation(operationCompareStrictEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1); |
637 | emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target); |
638 | } |
639 | |
640 | void JIT::emitSlow_op_jnstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
641 | { |
642 | linkAllSlowCases(iter); |
643 | |
644 | auto bytecode = currentInstruction->as<OpJnstricteq>(); |
645 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
646 | callOperation(operationCompareStrictEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1); |
647 | emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); |
648 | } |
649 | |
650 | void JIT::emit_op_to_number(const Instruction* currentInstruction) |
651 | { |
652 | auto bytecode = currentInstruction->as<OpToNumber>(); |
653 | int dstVReg = bytecode.m_dst.offset(); |
654 | int srcVReg = bytecode.m_operand.offset(); |
655 | emitGetVirtualRegister(srcVReg, regT0); |
656 | |
657 | addSlowCase(branchIfNotNumber(regT0)); |
658 | |
659 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
660 | if (srcVReg != dstVReg) |
661 | emitPutVirtualRegister(dstVReg); |
662 | } |
663 | |
664 | void JIT::emit_op_to_numeric(const Instruction* currentInstruction) |
665 | { |
666 | auto bytecode = currentInstruction->as<OpToNumeric>(); |
667 | int dstVReg = bytecode.m_dst.offset(); |
668 | int srcVReg = bytecode.m_operand.offset(); |
669 | emitGetVirtualRegister(srcVReg, regT0); |
670 | |
671 | Jump isNotCell = branchIfNotCell(regT0); |
672 | addSlowCase(branchIfNotBigInt(regT0)); |
673 | Jump isBigInt = jump(); |
674 | |
675 | isNotCell.link(this); |
676 | addSlowCase(branchIfNotNumber(regT0)); |
677 | isBigInt.link(this); |
678 | |
679 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
680 | if (srcVReg != dstVReg) |
681 | emitPutVirtualRegister(dstVReg); |
682 | } |
683 | |
684 | void JIT::emit_op_to_string(const Instruction* currentInstruction) |
685 | { |
686 | auto bytecode = currentInstruction->as<OpToString>(); |
687 | int srcVReg = bytecode.m_operand.offset(); |
688 | emitGetVirtualRegister(srcVReg, regT0); |
689 | |
690 | addSlowCase(branchIfNotCell(regT0)); |
691 | addSlowCase(branchIfNotString(regT0)); |
692 | |
693 | emitPutVirtualRegister(bytecode.m_dst.offset()); |
694 | } |
695 | |
696 | void JIT::emit_op_to_object(const Instruction* currentInstruction) |
697 | { |
698 | auto bytecode = currentInstruction->as<OpToObject>(); |
699 | int dstVReg = bytecode.m_dst.offset(); |
700 | int srcVReg = bytecode.m_operand.offset(); |
701 | emitGetVirtualRegister(srcVReg, regT0); |
702 | |
703 | addSlowCase(branchIfNotCell(regT0)); |
704 | addSlowCase(branchIfNotObject(regT0)); |
705 | |
706 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
707 | if (srcVReg != dstVReg) |
708 | emitPutVirtualRegister(dstVReg); |
709 | } |
710 | |
711 | void JIT::emit_op_catch(const Instruction* currentInstruction) |
712 | { |
713 | auto bytecode = currentInstruction->as<OpCatch>(); |
714 | |
715 | restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm().topEntryFrame); |
716 | |
717 | move(TrustedImmPtr(m_vm), regT3); |
718 | load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister); |
719 | storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset())); |
720 | |
721 | addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); |
722 | |
723 | callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler, TrustedImmPtr(&vm())); |
724 | Jump isCatchableException = branchTest32(Zero, returnValueGPR); |
725 | jumpToExceptionHandler(vm()); |
726 | isCatchableException.link(this); |
727 | |
728 | move(TrustedImmPtr(m_vm), regT3); |
729 | load64(Address(regT3, VM::exceptionOffset()), regT0); |
730 | store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset())); |
731 | emitPutVirtualRegister(bytecode.m_exception.offset()); |
732 | |
733 | load64(Address(regT0, Exception::valueOffset()), regT0); |
734 | emitPutVirtualRegister(bytecode.m_thrownValue.offset()); |
735 | |
736 | #if ENABLE(DFG_JIT) |
737 | // FIXME: consider inline caching the process of doing OSR entry, including |
738 | // argument type proofs, storing locals to the buffer, etc |
739 | // https://bugs.webkit.org/show_bug.cgi?id=175598 |
740 | |
741 | auto& metadata = bytecode.metadata(m_codeBlock); |
742 | ValueProfileAndOperandBuffer* buffer = metadata.m_buffer; |
743 | if (buffer || !shouldEmitProfiling()) |
744 | callOperation(operationTryOSREnterAtCatch, &vm(), m_bytecodeIndex.asBits()); |
745 | else |
746 | callOperation(operationTryOSREnterAtCatchAndValueProfile, &vm(), m_bytecodeIndex.asBits()); |
747 | auto skipOSREntry = branchTestPtr(Zero, returnValueGPR); |
748 | emitRestoreCalleeSaves(); |
749 | farJump(returnValueGPR, ExceptionHandlerPtrTag); |
750 | skipOSREntry.link(this); |
751 | if (buffer && shouldEmitProfiling()) { |
752 | buffer->forEach([&] (ValueProfileAndOperand& profile) { |
753 | JSValueRegs regs(regT0); |
754 | emitGetVirtualRegister(profile.m_operand, regs); |
755 | emitValueProfilingSite(static_cast<ValueProfile&>(profile)); |
756 | }); |
757 | } |
758 | #endif // ENABLE(DFG_JIT) |
759 | } |
760 | |
761 | void JIT::emit_op_identity_with_profile(const Instruction*) |
762 | { |
763 | // We don't need to do anything here... |
764 | } |
765 | |
766 | void JIT::emit_op_get_parent_scope(const Instruction* currentInstruction) |
767 | { |
768 | auto bytecode = currentInstruction->as<OpGetParentScope>(); |
769 | int currentScope = bytecode.m_scope.offset(); |
770 | emitGetVirtualRegister(currentScope, regT0); |
771 | loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); |
772 | emitStoreCell(bytecode.m_dst.offset(), regT0); |
773 | } |
774 | |
775 | void JIT::emit_op_switch_imm(const Instruction* currentInstruction) |
776 | { |
777 | auto bytecode = currentInstruction->as<OpSwitchImm>(); |
778 | size_t tableIndex = bytecode.m_tableIndex; |
779 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset); |
780 | unsigned scrutinee = bytecode.m_scrutinee.offset(); |
781 | |
782 | // create jump table for switch destinations, track this switch statement. |
783 | SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); |
784 | m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate)); |
785 | jumpTable->ensureCTITable(); |
786 | |
787 | emitGetVirtualRegister(scrutinee, regT0); |
788 | callOperation(operationSwitchImmWithUnknownKeyType, TrustedImmPtr(&vm()), regT0, tableIndex); |
789 | farJump(returnValueGPR, JSSwitchPtrTag); |
790 | } |
791 | |
792 | void JIT::emit_op_switch_char(const Instruction* currentInstruction) |
793 | { |
794 | auto bytecode = currentInstruction->as<OpSwitchChar>(); |
795 | size_t tableIndex = bytecode.m_tableIndex; |
796 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset); |
797 | unsigned scrutinee = bytecode.m_scrutinee.offset(); |
798 | |
799 | // create jump table for switch destinations, track this switch statement. |
800 | SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); |
801 | m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character)); |
802 | jumpTable->ensureCTITable(); |
803 | |
804 | emitGetVirtualRegister(scrutinee, regT0); |
805 | callOperation(operationSwitchCharWithUnknownKeyType, TrustedImmPtr(m_codeBlock->globalObject()), regT0, tableIndex); |
806 | farJump(returnValueGPR, JSSwitchPtrTag); |
807 | } |
808 | |
809 | void JIT::emit_op_switch_string(const Instruction* currentInstruction) |
810 | { |
811 | auto bytecode = currentInstruction->as<OpSwitchString>(); |
812 | size_t tableIndex = bytecode.m_tableIndex; |
813 | unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.m_defaultOffset); |
814 | unsigned scrutinee = bytecode.m_scrutinee.offset(); |
815 | |
816 | // create jump table for switch destinations, track this switch statement. |
817 | StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); |
818 | m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset)); |
819 | |
820 | emitGetVirtualRegister(scrutinee, regT0); |
821 | callOperation(operationSwitchStringWithUnknownKeyType, TrustedImmPtr(m_codeBlock->globalObject()), regT0, tableIndex); |
822 | farJump(returnValueGPR, JSSwitchPtrTag); |
823 | } |
824 | |
825 | void JIT::emit_op_debug(const Instruction* currentInstruction) |
826 | { |
827 | auto bytecode = currentInstruction->as<OpDebug>(); |
828 | load32(codeBlock()->debuggerRequestsAddress(), regT0); |
829 | Jump noDebuggerRequests = branchTest32(Zero, regT0); |
830 | callOperation(operationDebug, &vm(), static_cast<int>(bytecode.m_debugHookType)); |
831 | noDebuggerRequests.link(this); |
832 | } |
833 | |
834 | void JIT::emit_op_eq_null(const Instruction* currentInstruction) |
835 | { |
836 | auto bytecode = currentInstruction->as<OpEqNull>(); |
837 | int dst = bytecode.m_dst.offset(); |
838 | int src1 = bytecode.m_operand.offset(); |
839 | |
840 | emitGetVirtualRegister(src1, regT0); |
841 | Jump isImmediate = branchIfNotCell(regT0); |
842 | |
843 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
844 | move(TrustedImm32(0), regT0); |
845 | Jump wasNotMasqueradesAsUndefined = jump(); |
846 | |
847 | isMasqueradesAsUndefined.link(this); |
848 | emitLoadStructure(vm(), regT0, regT2, regT1); |
849 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
850 | loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); |
851 | comparePtr(Equal, regT0, regT2, regT0); |
852 | Jump wasNotImmediate = jump(); |
853 | |
854 | isImmediate.link(this); |
855 | |
856 | and64(TrustedImm32(~JSValue::UndefinedTag), regT0); |
857 | compare64(Equal, regT0, TrustedImm32(JSValue::ValueNull), regT0); |
858 | |
859 | wasNotImmediate.link(this); |
860 | wasNotMasqueradesAsUndefined.link(this); |
861 | |
862 | boxBoolean(regT0, JSValueRegs { regT0 }); |
863 | emitPutVirtualRegister(dst); |
864 | |
865 | } |
866 | |
867 | void JIT::emit_op_neq_null(const Instruction* currentInstruction) |
868 | { |
869 | auto bytecode = currentInstruction->as<OpNeqNull>(); |
870 | int dst = bytecode.m_dst.offset(); |
871 | int src1 = bytecode.m_operand.offset(); |
872 | |
873 | emitGetVirtualRegister(src1, regT0); |
874 | Jump isImmediate = branchIfNotCell(regT0); |
875 | |
876 | Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); |
877 | move(TrustedImm32(1), regT0); |
878 | Jump wasNotMasqueradesAsUndefined = jump(); |
879 | |
880 | isMasqueradesAsUndefined.link(this); |
881 | emitLoadStructure(vm(), regT0, regT2, regT1); |
882 | move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); |
883 | loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); |
884 | comparePtr(NotEqual, regT0, regT2, regT0); |
885 | Jump wasNotImmediate = jump(); |
886 | |
887 | isImmediate.link(this); |
888 | |
889 | and64(TrustedImm32(~JSValue::UndefinedTag), regT0); |
890 | compare64(NotEqual, regT0, TrustedImm32(JSValue::ValueNull), regT0); |
891 | |
892 | wasNotImmediate.link(this); |
893 | wasNotMasqueradesAsUndefined.link(this); |
894 | |
895 | boxBoolean(regT0, JSValueRegs { regT0 }); |
896 | emitPutVirtualRegister(dst); |
897 | } |
898 | |
899 | void JIT::emit_op_enter(const Instruction*) |
900 | { |
901 | // Even though CTI doesn't use them, we initialize our constant |
902 | // registers to zap stale pointers, to avoid unnecessarily prolonging |
903 | // object lifetime and increasing GC pressure. |
904 | size_t count = m_codeBlock->numVars(); |
905 | for (size_t j = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); j < count; ++j) |
906 | emitInitRegister(virtualRegisterForLocal(j).offset()); |
907 | |
908 | emitWriteBarrier(m_codeBlock); |
909 | |
910 | emitEnterOptimizationCheck(); |
911 | } |
912 | |
913 | void JIT::emit_op_get_scope(const Instruction* currentInstruction) |
914 | { |
915 | auto bytecode = currentInstruction->as<OpGetScope>(); |
916 | int dst = bytecode.m_dst.offset(); |
917 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0); |
918 | loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); |
919 | emitStoreCell(dst, regT0); |
920 | } |
921 | |
922 | void JIT::emit_op_to_this(const Instruction* currentInstruction) |
923 | { |
924 | auto bytecode = currentInstruction->as<OpToThis>(); |
925 | auto& metadata = bytecode.metadata(m_codeBlock); |
926 | StructureID* cachedStructureID = &metadata.m_cachedStructureID; |
927 | emitGetVirtualRegister(bytecode.m_srcDst.offset(), regT1); |
928 | |
929 | emitJumpSlowCaseIfNotJSCell(regT1); |
930 | |
931 | addSlowCase(branchIfNotType(regT1, FinalObjectType)); |
932 | load32(cachedStructureID, regT2); |
933 | addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2)); |
934 | } |
935 | |
936 | void JIT::emit_op_create_this(const Instruction* currentInstruction) |
937 | { |
938 | auto bytecode = currentInstruction->as<OpCreateThis>(); |
939 | auto& metadata = bytecode.metadata(m_codeBlock); |
940 | int callee = bytecode.m_callee.offset(); |
941 | WriteBarrierBase<JSCell>* cachedFunction = &metadata.m_cachedCallee; |
942 | RegisterID calleeReg = regT0; |
943 | RegisterID rareDataReg = regT4; |
944 | RegisterID resultReg = regT0; |
945 | RegisterID allocatorReg = regT1; |
946 | RegisterID structureReg = regT2; |
947 | RegisterID cachedFunctionReg = regT4; |
948 | RegisterID scratchReg = regT3; |
949 | |
950 | emitGetVirtualRegister(callee, calleeReg); |
951 | addSlowCase(branchIfNotFunction(calleeReg)); |
952 | loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); |
953 | addSlowCase(branchTestPtr(Zero, rareDataReg)); |
954 | loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfAllocator()), allocatorReg); |
955 | loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfStructure()), structureReg); |
956 | |
957 | loadPtr(cachedFunction, cachedFunctionReg); |
958 | Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); |
959 | addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); |
960 | hasSeenMultipleCallees.link(this); |
961 | |
962 | JumpList slowCases; |
963 | auto butterfly = TrustedImmPtr(nullptr); |
964 | emitAllocateJSObject(resultReg, JITAllocator::variable(), allocatorReg, structureReg, butterfly, scratchReg, slowCases); |
965 | load8(Address(structureReg, Structure::inlineCapacityOffset()), scratchReg); |
966 | emitInitializeInlineStorage(resultReg, scratchReg); |
967 | addSlowCase(slowCases); |
968 | emitPutVirtualRegister(bytecode.m_dst.offset()); |
969 | } |
970 | |
971 | void JIT::emit_op_check_tdz(const Instruction* currentInstruction) |
972 | { |
973 | auto bytecode = currentInstruction->as<OpCheckTdz>(); |
974 | emitGetVirtualRegister(bytecode.m_targetVirtualRegister.offset(), regT0); |
975 | addSlowCase(branchIfEmpty(regT0)); |
976 | } |
977 | |
978 | |
979 | // Slow cases |
980 | |
981 | void JIT::emitSlow_op_eq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
982 | { |
983 | linkAllSlowCases(iter); |
984 | |
985 | auto bytecode = currentInstruction->as<OpEq>(); |
986 | callOperation(operationCompareEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1); |
987 | boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR }); |
988 | emitPutVirtualRegister(bytecode.m_dst.offset(), returnValueGPR); |
989 | } |
990 | |
991 | void JIT::emitSlow_op_neq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
992 | { |
993 | linkAllSlowCases(iter); |
994 | |
995 | auto bytecode = currentInstruction->as<OpNeq>(); |
996 | callOperation(operationCompareEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1); |
997 | xor32(TrustedImm32(0x1), regT0); |
998 | boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR }); |
999 | emitPutVirtualRegister(bytecode.m_dst.offset(), returnValueGPR); |
1000 | } |
1001 | |
1002 | void JIT::emitSlow_op_jeq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1003 | { |
1004 | linkAllSlowCases(iter); |
1005 | |
1006 | auto bytecode = currentInstruction->as<OpJeq>(); |
1007 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
1008 | callOperation(operationCompareEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1); |
1009 | emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target); |
1010 | } |
1011 | |
1012 | void JIT::emitSlow_op_jneq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1013 | { |
1014 | linkAllSlowCases(iter); |
1015 | |
1016 | auto bytecode = currentInstruction->as<OpJneq>(); |
1017 | unsigned target = jumpTarget(currentInstruction, bytecode.m_targetLabel); |
1018 | callOperation(operationCompareEq, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1); |
1019 | emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); |
1020 | } |
1021 | |
1022 | void JIT::emitSlow_op_instanceof_custom(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1023 | { |
1024 | linkAllSlowCases(iter); |
1025 | |
1026 | auto bytecode = currentInstruction->as<OpInstanceofCustom>(); |
1027 | int dst = bytecode.m_dst.offset(); |
1028 | int value = bytecode.m_value.offset(); |
1029 | int constructor = bytecode.m_constructor.offset(); |
1030 | int hasInstanceValue = bytecode.m_hasInstanceValue.offset(); |
1031 | |
1032 | emitGetVirtualRegister(value, regT0); |
1033 | emitGetVirtualRegister(constructor, regT1); |
1034 | emitGetVirtualRegister(hasInstanceValue, regT2); |
1035 | callOperation(operationInstanceOfCustom, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, regT2); |
1036 | boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR }); |
1037 | emitPutVirtualRegister(dst, returnValueGPR); |
1038 | } |
1039 | |
1040 | #endif // USE(JSVALUE64) |
1041 | |
1042 | void JIT::emit_op_loop_hint(const Instruction*) |
1043 | { |
1044 | // Emit the JIT optimization check: |
1045 | if (canBeOptimized()) { |
1046 | addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()), |
1047 | AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); |
1048 | } |
1049 | } |
1050 | |
1051 | void JIT::emitSlow_op_loop_hint(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1052 | { |
1053 | #if ENABLE(DFG_JIT) |
1054 | // Emit the slow path for the JIT optimization check: |
1055 | if (canBeOptimized()) { |
1056 | linkAllSlowCases(iter); |
1057 | |
1058 | copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm().topEntryFrame); |
1059 | |
1060 | callOperation(operationOptimize, &vm(), m_bytecodeIndex.asBits()); |
1061 | Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR); |
1062 | if (!ASSERT_DISABLED) { |
1063 | Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000)))); |
1064 | abortWithReason(JITUnreasonableLoopHintJumpTarget); |
1065 | ok.link(this); |
1066 | } |
1067 | farJump(returnValueGPR, GPRInfo::callFrameRegister); |
1068 | noOptimizedEntry.link(this); |
1069 | |
1070 | emitJumpSlowToHot(jump(), currentInstruction->size()); |
1071 | } |
1072 | #else |
1073 | UNUSED_PARAM(currentInstruction); |
1074 | UNUSED_PARAM(iter); |
1075 | #endif |
1076 | } |
1077 | |
1078 | void JIT::emit_op_check_traps(const Instruction*) |
1079 | { |
1080 | addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->needTrapHandlingAddress()))); |
1081 | } |
1082 | |
1083 | void JIT::emit_op_nop(const Instruction*) |
1084 | { |
1085 | } |
1086 | |
1087 | void JIT::emit_op_super_sampler_begin(const Instruction*) |
1088 | { |
1089 | add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); |
1090 | } |
1091 | |
1092 | void JIT::emit_op_super_sampler_end(const Instruction*) |
1093 | { |
1094 | sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); |
1095 | } |
1096 | |
1097 | void JIT::emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator& iter) |
1098 | { |
1099 | linkAllSlowCases(iter); |
1100 | |
1101 | callOperation(operationHandleTraps, TrustedImmPtr(m_codeBlock->globalObject())); |
1102 | } |
1103 | |
1104 | void JIT::emit_op_new_regexp(const Instruction* currentInstruction) |
1105 | { |
1106 | auto bytecode = currentInstruction->as<OpNewRegexp>(); |
1107 | int dst = bytecode.m_dst.offset(); |
1108 | int regexp = bytecode.m_regexp.offset(); |
1109 | callOperation(operationNewRegexp, TrustedImmPtr(m_codeBlock->globalObject()), jsCast<RegExp*>(m_codeBlock->getConstant(regexp))); |
1110 | emitStoreCell(dst, returnValueGPR); |
1111 | } |
1112 | |
1113 | template<typename Op> |
1114 | void JIT::emitNewFuncCommon(const Instruction* currentInstruction) |
1115 | { |
1116 | Jump lazyJump; |
1117 | auto bytecode = currentInstruction->as<Op>(); |
1118 | int dst = bytecode.m_dst.offset(); |
1119 | |
1120 | #if USE(JSVALUE64) |
1121 | emitGetVirtualRegister(bytecode.m_scope.offset(), regT0); |
1122 | #else |
1123 | emitLoadPayload(bytecode.m_scope.offset(), regT0); |
1124 | #endif |
1125 | FunctionExecutable* funcExec = m_codeBlock->functionDecl(bytecode.m_functionDecl); |
1126 | |
1127 | OpcodeID opcodeID = Op::opcodeID; |
1128 | if (opcodeID == op_new_func) |
1129 | callOperation(operationNewFunction, dst, &vm(), regT0, funcExec); |
1130 | else if (opcodeID == op_new_generator_func) |
1131 | callOperation(operationNewGeneratorFunction, dst, &vm(), regT0, funcExec); |
1132 | else if (opcodeID == op_new_async_func) |
1133 | callOperation(operationNewAsyncFunction, dst, &vm(), regT0, funcExec); |
1134 | else { |
1135 | ASSERT(opcodeID == op_new_async_generator_func); |
1136 | callOperation(operationNewAsyncGeneratorFunction, dst, &vm(), regT0, funcExec); |
1137 | } |
1138 | } |
1139 | |
1140 | void JIT::emit_op_new_func(const Instruction* currentInstruction) |
1141 | { |
1142 | emitNewFuncCommon<OpNewFunc>(currentInstruction); |
1143 | } |
1144 | |
1145 | void JIT::emit_op_new_generator_func(const Instruction* currentInstruction) |
1146 | { |
1147 | emitNewFuncCommon<OpNewGeneratorFunc>(currentInstruction); |
1148 | } |
1149 | |
1150 | void JIT::emit_op_new_async_generator_func(const Instruction* currentInstruction) |
1151 | { |
1152 | emitNewFuncCommon<OpNewAsyncGeneratorFunc>(currentInstruction); |
1153 | } |
1154 | |
1155 | void JIT::emit_op_new_async_func(const Instruction* currentInstruction) |
1156 | { |
1157 | emitNewFuncCommon<OpNewAsyncFunc>(currentInstruction); |
1158 | } |
1159 | |
1160 | template<typename Op> |
1161 | void JIT::emitNewFuncExprCommon(const Instruction* currentInstruction) |
1162 | { |
1163 | auto bytecode = currentInstruction->as<Op>(); |
1164 | int dst = bytecode.m_dst.offset(); |
1165 | #if USE(JSVALUE64) |
1166 | emitGetVirtualRegister(bytecode.m_scope.offset(), regT0); |
1167 | #else |
1168 | emitLoadPayload(bytecode.m_scope.offset(), regT0); |
1169 | #endif |
1170 | |
1171 | FunctionExecutable* function = m_codeBlock->functionExpr(bytecode.m_functionDecl); |
1172 | OpcodeID opcodeID = Op::opcodeID; |
1173 | |
1174 | if (opcodeID == op_new_func_exp) |
1175 | callOperation(operationNewFunction, dst, &vm(), regT0, function); |
1176 | else if (opcodeID == op_new_generator_func_exp) |
1177 | callOperation(operationNewGeneratorFunction, dst, &vm(), regT0, function); |
1178 | else if (opcodeID == op_new_async_func_exp) |
1179 | callOperation(operationNewAsyncFunction, dst, &vm(), regT0, function); |
1180 | else { |
1181 | ASSERT(opcodeID == op_new_async_generator_func_exp); |
1182 | callOperation(operationNewAsyncGeneratorFunction, dst, &vm(), regT0, function); |
1183 | } |
1184 | } |
1185 | |
1186 | void JIT::emit_op_new_func_exp(const Instruction* currentInstruction) |
1187 | { |
1188 | emitNewFuncExprCommon<OpNewFuncExp>(currentInstruction); |
1189 | } |
1190 | |
1191 | void JIT::emit_op_new_generator_func_exp(const Instruction* currentInstruction) |
1192 | { |
1193 | emitNewFuncExprCommon<OpNewGeneratorFuncExp>(currentInstruction); |
1194 | } |
1195 | |
1196 | void JIT::emit_op_new_async_func_exp(const Instruction* currentInstruction) |
1197 | { |
1198 | emitNewFuncExprCommon<OpNewAsyncFuncExp>(currentInstruction); |
1199 | } |
1200 | |
1201 | void JIT::emit_op_new_async_generator_func_exp(const Instruction* currentInstruction) |
1202 | { |
1203 | emitNewFuncExprCommon<OpNewAsyncGeneratorFuncExp>(currentInstruction); |
1204 | } |
1205 | |
1206 | void JIT::emit_op_new_array(const Instruction* currentInstruction) |
1207 | { |
1208 | auto bytecode = currentInstruction->as<OpNewArray>(); |
1209 | auto& metadata = bytecode.metadata(m_codeBlock); |
1210 | int dst = bytecode.m_dst.offset(); |
1211 | int valuesIndex = bytecode.m_argv.offset(); |
1212 | int size = bytecode.m_argc; |
1213 | addPtr(TrustedImm32(valuesIndex * sizeof(Register)), callFrameRegister, regT0); |
1214 | callOperation(operationNewArrayWithProfile, dst, TrustedImmPtr(m_codeBlock->globalObject()), |
1215 | &metadata.m_arrayAllocationProfile, regT0, size); |
1216 | } |
1217 | |
1218 | void JIT::emit_op_new_array_with_size(const Instruction* currentInstruction) |
1219 | { |
1220 | auto bytecode = currentInstruction->as<OpNewArrayWithSize>(); |
1221 | auto& metadata = bytecode.metadata(m_codeBlock); |
1222 | int dst = bytecode.m_dst.offset(); |
1223 | int sizeIndex = bytecode.m_length.offset(); |
1224 | #if USE(JSVALUE64) |
1225 | emitGetVirtualRegister(sizeIndex, regT0); |
1226 | callOperation(operationNewArrayWithSizeAndProfile, dst, TrustedImmPtr(m_codeBlock->globalObject()), |
1227 | &metadata.m_arrayAllocationProfile, regT0); |
1228 | #else |
1229 | emitLoad(sizeIndex, regT1, regT0); |
1230 | callOperation(operationNewArrayWithSizeAndProfile, dst, TrustedImmPtr(m_codeBlock->globalObject()), |
1231 | &metadata.m_arrayAllocationProfile, JSValueRegs(regT1, regT0)); |
1232 | #endif |
1233 | } |
1234 | |
1235 | #if USE(JSVALUE64) |
1236 | void JIT::emit_op_has_structure_property(const Instruction* currentInstruction) |
1237 | { |
1238 | auto bytecode = currentInstruction->as<OpHasStructureProperty>(); |
1239 | int dst = bytecode.m_dst.offset(); |
1240 | int base = bytecode.m_base.offset(); |
1241 | int enumerator = bytecode.m_enumerator.offset(); |
1242 | |
1243 | emitGetVirtualRegister(base, regT0); |
1244 | emitGetVirtualRegister(enumerator, regT1); |
1245 | emitJumpSlowCaseIfNotJSCell(regT0, base); |
1246 | |
1247 | load32(Address(regT0, JSCell::structureIDOffset()), regT0); |
1248 | addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); |
1249 | |
1250 | move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); |
1251 | emitPutVirtualRegister(dst); |
1252 | } |
1253 | |
1254 | void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) |
1255 | { |
1256 | const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); |
1257 | |
1258 | PatchableJump badType; |
1259 | |
1260 | // FIXME: Add support for other types like TypedArrays and Arguments. |
1261 | // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. |
1262 | JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); |
1263 | move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); |
1264 | Jump done = jump(); |
1265 | |
1266 | LinkBuffer patchBuffer(*this, m_codeBlock); |
1267 | |
1268 | patchBuffer.link(badType, byValInfo->slowPathTarget); |
1269 | patchBuffer.link(slowCases, byValInfo->slowPathTarget); |
1270 | |
1271 | patchBuffer.link(done, byValInfo->badTypeDoneTarget); |
1272 | |
1273 | byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( |
1274 | m_codeBlock, patchBuffer, JITStubRoutinePtrTag, |
1275 | "Baseline has_indexed_property stub for %s, return point %p" , toCString(*m_codeBlock).data(), returnAddress.value()); |
1276 | |
1277 | MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel<JITStubRoutinePtrTag>(byValInfo->stubRoutine->code().code())); |
1278 | MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(operationHasIndexedPropertyGeneric)); |
1279 | } |
1280 | |
1281 | void JIT::emit_op_has_indexed_property(const Instruction* currentInstruction) |
1282 | { |
1283 | auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); |
1284 | auto& metadata = bytecode.metadata(m_codeBlock); |
1285 | int dst = bytecode.m_dst.offset(); |
1286 | int base = bytecode.m_base.offset(); |
1287 | int property = bytecode.m_property.offset(); |
1288 | ArrayProfile* profile = &metadata.m_arrayProfile; |
1289 | ByValInfo* byValInfo = m_codeBlock->addByValInfo(); |
1290 | |
1291 | emitGetVirtualRegisters(base, regT0, property, regT1); |
1292 | |
1293 | emitJumpSlowCaseIfNotInt(regT1); |
1294 | |
1295 | // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. |
1296 | // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if |
1297 | // number was signed since m_vectorLength is always less than intmax (since the total allocation |
1298 | // size is always less than 4Gb). As such zero extending will have been correct (and extending the value |
1299 | // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign |
1300 | // extending since it makes it easier to re-tag the value in the slow case. |
1301 | zeroExtend32ToPtr(regT1, regT1); |
1302 | |
1303 | emitJumpSlowCaseIfNotJSCell(regT0, base); |
1304 | emitArrayProfilingSiteWithCell(regT0, regT2, profile); |
1305 | and32(TrustedImm32(IndexingShapeMask), regT2); |
1306 | |
1307 | JITArrayMode mode = chooseArrayMode(profile); |
1308 | PatchableJump badType; |
1309 | |
1310 | // FIXME: Add support for other types like TypedArrays and Arguments. |
1311 | // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. |
1312 | JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); |
1313 | |
1314 | move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); |
1315 | |
1316 | addSlowCase(badType); |
1317 | addSlowCase(slowCases); |
1318 | |
1319 | Label done = label(); |
1320 | |
1321 | emitPutVirtualRegister(dst); |
1322 | |
1323 | Label nextHotPath = label(); |
1324 | |
1325 | m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeIndex, PatchableJump(), badType, mode, profile, done, nextHotPath)); |
1326 | } |
1327 | |
1328 | void JIT::emitSlow_op_has_indexed_property(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
1329 | { |
1330 | linkAllSlowCases(iter); |
1331 | |
1332 | auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); |
1333 | int dst = bytecode.m_dst.offset(); |
1334 | int base = bytecode.m_base.offset(); |
1335 | int property = bytecode.m_property.offset(); |
1336 | ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; |
1337 | |
1338 | Label slowPath = label(); |
1339 | |
1340 | emitGetVirtualRegister(base, regT0); |
1341 | emitGetVirtualRegister(property, regT1); |
1342 | Call call = callOperation(operationHasIndexedPropertyDefault, dst, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, byValInfo); |
1343 | |
1344 | m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; |
1345 | m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; |
1346 | m_byValInstructionIndex++; |
1347 | } |
1348 | |
1349 | void JIT::emit_op_get_direct_pname(const Instruction* currentInstruction) |
1350 | { |
1351 | auto bytecode = currentInstruction->as<OpGetDirectPname>(); |
1352 | int dst = bytecode.m_dst.offset(); |
1353 | int base = bytecode.m_base.offset(); |
1354 | int index = bytecode.m_index.offset(); |
1355 | int enumerator = bytecode.m_enumerator.offset(); |
1356 | |
1357 | // Check that base is a cell |
1358 | emitGetVirtualRegister(base, regT0); |
1359 | emitJumpSlowCaseIfNotJSCell(regT0, base); |
1360 | |
1361 | // Check the structure |
1362 | emitGetVirtualRegister(enumerator, regT2); |
1363 | load32(Address(regT0, JSCell::structureIDOffset()), regT1); |
1364 | addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset()))); |
1365 | |
1366 | // Compute the offset |
1367 | emitGetVirtualRegister(index, regT1); |
1368 | // If index is less than the enumerator's cached inline storage, then it's an inline access |
1369 | Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); |
1370 | addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); |
1371 | signExtend32ToPtr(regT1, regT1); |
1372 | load64(BaseIndex(regT0, regT1, TimesEight), regT0); |
1373 | |
1374 | Jump done = jump(); |
1375 | |
1376 | // Otherwise it's out of line |
1377 | outOfLineAccess.link(this); |
1378 | loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); |
1379 | sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1); |
1380 | neg32(regT1); |
1381 | signExtend32ToPtr(regT1, regT1); |
1382 | int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); |
1383 | load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0); |
1384 | |
1385 | done.link(this); |
1386 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
1387 | emitPutVirtualRegister(dst, regT0); |
1388 | } |
1389 | |
1390 | void JIT::emit_op_enumerator_structure_pname(const Instruction* currentInstruction) |
1391 | { |
1392 | auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>(); |
1393 | int dst = bytecode.m_dst.offset(); |
1394 | int enumerator = bytecode.m_enumerator.offset(); |
1395 | int index = bytecode.m_index.offset(); |
1396 | |
1397 | emitGetVirtualRegister(index, regT0); |
1398 | emitGetVirtualRegister(enumerator, regT1); |
1399 | Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); |
1400 | |
1401 | move(TrustedImm64(JSValue::encode(jsNull())), regT0); |
1402 | |
1403 | Jump done = jump(); |
1404 | inBounds.link(this); |
1405 | |
1406 | loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); |
1407 | signExtend32ToPtr(regT0, regT0); |
1408 | load64(BaseIndex(regT1, regT0, TimesEight), regT0); |
1409 | |
1410 | done.link(this); |
1411 | emitPutVirtualRegister(dst); |
1412 | } |
1413 | |
1414 | void JIT::emit_op_enumerator_generic_pname(const Instruction* currentInstruction) |
1415 | { |
1416 | auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>(); |
1417 | int dst = bytecode.m_dst.offset(); |
1418 | int enumerator = bytecode.m_enumerator.offset(); |
1419 | int index = bytecode.m_index.offset(); |
1420 | |
1421 | emitGetVirtualRegister(index, regT0); |
1422 | emitGetVirtualRegister(enumerator, regT1); |
1423 | Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); |
1424 | |
1425 | move(TrustedImm64(JSValue::encode(jsNull())), regT0); |
1426 | |
1427 | Jump done = jump(); |
1428 | inBounds.link(this); |
1429 | |
1430 | loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); |
1431 | signExtend32ToPtr(regT0, regT0); |
1432 | load64(BaseIndex(regT1, regT0, TimesEight), regT0); |
1433 | |
1434 | done.link(this); |
1435 | emitPutVirtualRegister(dst); |
1436 | } |
1437 | |
1438 | void JIT::emit_op_profile_type(const Instruction* currentInstruction) |
1439 | { |
1440 | auto bytecode = currentInstruction->as<OpProfileType>(); |
1441 | auto& metadata = bytecode.metadata(m_codeBlock); |
1442 | TypeLocation* cachedTypeLocation = metadata.m_typeLocation; |
1443 | int valueToProfile = bytecode.m_targetVirtualRegister.offset(); |
1444 | |
1445 | emitGetVirtualRegister(valueToProfile, regT0); |
1446 | |
1447 | JumpList jumpToEnd; |
1448 | |
1449 | jumpToEnd.append(branchIfEmpty(regT0)); |
1450 | |
1451 | // Compile in a predictive type check, if possible, to see if we can skip writing to the log. |
1452 | // These typechecks are inlined to match those of the 64-bit JSValue type checks. |
1453 | if (cachedTypeLocation->m_lastSeenType == TypeUndefined) |
1454 | jumpToEnd.append(branchIfUndefined(regT0)); |
1455 | else if (cachedTypeLocation->m_lastSeenType == TypeNull) |
1456 | jumpToEnd.append(branchIfNull(regT0)); |
1457 | else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) |
1458 | jumpToEnd.append(branchIfBoolean(regT0, regT1)); |
1459 | else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt) |
1460 | jumpToEnd.append(branchIfInt32(regT0)); |
1461 | else if (cachedTypeLocation->m_lastSeenType == TypeNumber) |
1462 | jumpToEnd.append(branchIfNumber(regT0)); |
1463 | else if (cachedTypeLocation->m_lastSeenType == TypeString) { |
1464 | Jump isNotCell = branchIfNotCell(regT0); |
1465 | jumpToEnd.append(branchIfString(regT0)); |
1466 | isNotCell.link(this); |
1467 | } |
1468 | |
1469 | // Load the type profiling log into T2. |
1470 | TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); |
1471 | move(TrustedImmPtr(cachedTypeProfilerLog), regT2); |
1472 | // Load the next log entry into T1. |
1473 | loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); |
1474 | |
1475 | // Store the JSValue onto the log entry. |
1476 | store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset())); |
1477 | |
1478 | // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry. |
1479 | Jump notCell = branchIfNotCell(regT0); |
1480 | load32(Address(regT0, JSCell::structureIDOffset()), regT0); |
1481 | store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); |
1482 | Jump skipIsCell = jump(); |
1483 | notCell.link(this); |
1484 | store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); |
1485 | skipIsCell.link(this); |
1486 | |
1487 | // Store the typeLocation on the log entry. |
1488 | move(TrustedImmPtr(cachedTypeLocation), regT0); |
1489 | store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); |
1490 | |
1491 | // Increment the current log entry. |
1492 | addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); |
1493 | store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); |
1494 | Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())); |
1495 | // Clear the log if we're at the end of the log. |
1496 | callOperation(operationProcessTypeProfilerLog, &vm()); |
1497 | skipClearLog.link(this); |
1498 | |
1499 | jumpToEnd.link(this); |
1500 | } |
1501 | |
1502 | void JIT::emit_op_log_shadow_chicken_prologue(const Instruction* currentInstruction) |
1503 | { |
1504 | RELEASE_ASSERT(vm().shadowChicken()); |
1505 | updateTopCallFrame(); |
1506 | static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true." ); |
1507 | auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>(); |
1508 | GPRReg shadowPacketReg = regT0; |
1509 | GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. |
1510 | GPRReg scratch2Reg = regT2; |
1511 | ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg); |
1512 | emitGetVirtualRegister(bytecode.m_scope.offset(), regT3); |
1513 | logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3); |
1514 | } |
1515 | |
1516 | void JIT::emit_op_log_shadow_chicken_tail(const Instruction* currentInstruction) |
1517 | { |
1518 | RELEASE_ASSERT(vm().shadowChicken()); |
1519 | updateTopCallFrame(); |
1520 | static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true." ); |
1521 | auto bytecode = currentInstruction->as<OpLogShadowChickenTail>(); |
1522 | GPRReg shadowPacketReg = regT0; |
1523 | GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. |
1524 | GPRReg scratch2Reg = regT2; |
1525 | ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg); |
1526 | emitGetVirtualRegister(bytecode.m_thisValue.offset(), regT2); |
1527 | emitGetVirtualRegister(bytecode.m_scope.offset(), regT3); |
1528 | logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeIndex)); |
1529 | } |
1530 | |
1531 | #endif // USE(JSVALUE64) |
1532 | |
1533 | void JIT::emit_op_profile_control_flow(const Instruction* currentInstruction) |
1534 | { |
1535 | auto bytecode = currentInstruction->as<OpProfileControlFlow>(); |
1536 | auto& metadata = bytecode.metadata(m_codeBlock); |
1537 | BasicBlockLocation* basicBlockLocation = metadata.m_basicBlockLocation; |
1538 | #if USE(JSVALUE64) |
1539 | basicBlockLocation->emitExecuteCode(*this); |
1540 | #else |
1541 | basicBlockLocation->emitExecuteCode(*this, regT0); |
1542 | #endif |
1543 | } |
1544 | |
1545 | void JIT::emit_op_argument_count(const Instruction* currentInstruction) |
1546 | { |
1547 | auto bytecode = currentInstruction->as<OpArgumentCount>(); |
1548 | int dst = bytecode.m_dst.offset(); |
1549 | load32(payloadFor(CallFrameSlot::argumentCount), regT0); |
1550 | sub32(TrustedImm32(1), regT0); |
1551 | JSValueRegs result = JSValueRegs::withTwoAvailableRegs(regT0, regT1); |
1552 | boxInt32(regT0, result); |
1553 | emitPutVirtualRegister(dst, result); |
1554 | } |
1555 | |
1556 | void JIT::emit_op_get_rest_length(const Instruction* currentInstruction) |
1557 | { |
1558 | auto bytecode = currentInstruction->as<OpGetRestLength>(); |
1559 | int dst = bytecode.m_dst.offset(); |
1560 | unsigned numParamsToSkip = bytecode.m_numParametersToSkip; |
1561 | load32(payloadFor(CallFrameSlot::argumentCount), regT0); |
1562 | sub32(TrustedImm32(1), regT0); |
1563 | Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip)); |
1564 | sub32(Imm32(numParamsToSkip), regT0); |
1565 | #if USE(JSVALUE64) |
1566 | boxInt32(regT0, JSValueRegs(regT0)); |
1567 | #endif |
1568 | Jump done = jump(); |
1569 | |
1570 | zeroLength.link(this); |
1571 | #if USE(JSVALUE64) |
1572 | move(TrustedImm64(JSValue::encode(jsNumber(0))), regT0); |
1573 | #else |
1574 | move(TrustedImm32(0), regT0); |
1575 | #endif |
1576 | |
1577 | done.link(this); |
1578 | #if USE(JSVALUE64) |
1579 | emitPutVirtualRegister(dst, regT0); |
1580 | #else |
1581 | move(TrustedImm32(JSValue::Int32Tag), regT1); |
1582 | emitPutVirtualRegister(dst, JSValueRegs(regT1, regT0)); |
1583 | #endif |
1584 | } |
1585 | |
1586 | void JIT::emit_op_get_argument(const Instruction* currentInstruction) |
1587 | { |
1588 | auto bytecode = currentInstruction->as<OpGetArgument>(); |
1589 | int dst = bytecode.m_dst.offset(); |
1590 | int index = bytecode.m_index; |
1591 | #if USE(JSVALUE64) |
1592 | JSValueRegs resultRegs(regT0); |
1593 | #else |
1594 | JSValueRegs resultRegs(regT1, regT0); |
1595 | #endif |
1596 | |
1597 | load32(payloadFor(CallFrameSlot::argumentCount), regT2); |
1598 | Jump argumentOutOfBounds = branch32(LessThanOrEqual, regT2, TrustedImm32(index)); |
1599 | loadValue(addressFor(CallFrameSlot::thisArgument + index), resultRegs); |
1600 | Jump done = jump(); |
1601 | |
1602 | argumentOutOfBounds.link(this); |
1603 | moveValue(jsUndefined(), resultRegs); |
1604 | |
1605 | done.link(this); |
1606 | emitValueProfilingSite(bytecode.metadata(m_codeBlock)); |
1607 | emitPutVirtualRegister(dst, resultRegs); |
1608 | } |
1609 | |
1610 | } // namespace JSC |
1611 | |
1612 | #endif // ENABLE(JIT) |
1613 | |