1 | /* |
2 | * Copyright (C) 2011-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #if ENABLE(JIT) |
29 | |
30 | #include "CodeBlock.h" |
31 | #include "EntryFrame.h" |
32 | #include "FPRInfo.h" |
33 | #include "GPRInfo.h" |
34 | #include "Heap.h" |
35 | #include "InlineCallFrame.h" |
36 | #include "JITAllocator.h" |
37 | #include "JITCode.h" |
38 | #include "JSCellInlines.h" |
39 | #include "MacroAssembler.h" |
40 | #include "MarkedSpace.h" |
41 | #include "RegisterAtOffsetList.h" |
42 | #include "RegisterSet.h" |
43 | #include "StackAlignment.h" |
44 | #include "TagRegistersMode.h" |
45 | #include "TypeofType.h" |
46 | #include "VM.h" |
47 | |
48 | namespace JSC { |
49 | |
50 | typedef void (*V_DebugOperation_EPP)(CallFrame*, void*, void*); |
51 | |
52 | class AssemblyHelpers : public MacroAssembler { |
53 | public: |
54 | AssemblyHelpers(CodeBlock* codeBlock) |
55 | : m_codeBlock(codeBlock) |
56 | , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0) |
57 | { |
58 | if (m_codeBlock) { |
59 | ASSERT(m_baselineCodeBlock); |
60 | ASSERT(!m_baselineCodeBlock->alternative()); |
61 | ASSERT(m_baselineCodeBlock->jitType() == JITType::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType())); |
62 | } |
63 | } |
64 | |
65 | CodeBlock* codeBlock() { return m_codeBlock; } |
66 | VM& vm() { return m_codeBlock->vm(); } |
67 | AssemblerType_T& assembler() { return m_assembler; } |
68 | |
69 | void prepareCallOperation(VM& vm) |
70 | { |
71 | UNUSED_PARAM(vm); |
72 | #if !USE(BUILTIN_FRAME_ADDRESS) || !ASSERT_DISABLED |
73 | storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame); |
74 | #endif |
75 | } |
76 | |
77 | void checkStackPointerAlignment() |
78 | { |
79 | // This check is both unneeded and harder to write correctly for ARM64 |
80 | #if !defined(NDEBUG) && !CPU(ARM64) |
81 | Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf)); |
82 | abortWithReason(AHStackPointerMisaligned); |
83 | stackPointerAligned.link(this); |
84 | #endif |
85 | } |
86 | |
87 | #if USE(JSVALUE64) |
88 | void store64FromReg(Reg src, Address dst) |
89 | { |
90 | if (src.isFPR()) |
91 | storeDouble(src.fpr(), dst); |
92 | else |
93 | store64(src.gpr(), dst); |
94 | } |
95 | #endif |
96 | |
97 | void store32FromReg(Reg src, Address dst) |
98 | { |
99 | if (src.isFPR()) |
100 | storeFloat(src.fpr(), dst); |
101 | else |
102 | store32(src.gpr(), dst); |
103 | } |
104 | |
105 | #if USE(JSVALUE64) |
106 | void load64ToReg(Address src, Reg dst) |
107 | { |
108 | if (dst.isFPR()) |
109 | loadDouble(src, dst.fpr()); |
110 | else |
111 | load64(src, dst.gpr()); |
112 | } |
113 | #endif |
114 | |
115 | void load32ToReg(Address src, Reg dst) |
116 | { |
117 | if (dst.isFPR()) |
118 | loadFloat(src, dst.fpr()); |
119 | else |
120 | load32(src, dst.gpr()); |
121 | } |
122 | |
123 | template<typename T> |
124 | void storeCell(T cell, Address address) |
125 | { |
126 | #if USE(JSVALUE64) |
127 | store64(cell, address); |
128 | #else |
129 | store32(cell, address.withOffset(PayloadOffset)); |
130 | store32(TrustedImm32(JSValue::CellTag), address.withOffset(TagOffset)); |
131 | #endif |
132 | } |
133 | |
134 | void loadCell(Address address, GPRReg gpr) |
135 | { |
136 | #if USE(JSVALUE64) |
137 | load64(address, gpr); |
138 | #else |
139 | load32(address.withOffset(PayloadOffset), gpr); |
140 | #endif |
141 | } |
142 | |
143 | void storeValue(JSValueRegs regs, Address address) |
144 | { |
145 | #if USE(JSVALUE64) |
146 | store64(regs.gpr(), address); |
147 | #else |
148 | store32(regs.payloadGPR(), address.withOffset(PayloadOffset)); |
149 | store32(regs.tagGPR(), address.withOffset(TagOffset)); |
150 | #endif |
151 | } |
152 | |
153 | void storeValue(JSValueRegs regs, BaseIndex address) |
154 | { |
155 | #if USE(JSVALUE64) |
156 | store64(regs.gpr(), address); |
157 | #else |
158 | store32(regs.payloadGPR(), address.withOffset(PayloadOffset)); |
159 | store32(regs.tagGPR(), address.withOffset(TagOffset)); |
160 | #endif |
161 | } |
162 | |
163 | void storeValue(JSValueRegs regs, void* address) |
164 | { |
165 | #if USE(JSVALUE64) |
166 | store64(regs.gpr(), address); |
167 | #else |
168 | store32(regs.payloadGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset)); |
169 | store32(regs.tagGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset)); |
170 | #endif |
171 | } |
172 | |
173 | void loadValue(Address address, JSValueRegs regs) |
174 | { |
175 | #if USE(JSVALUE64) |
176 | load64(address, regs.gpr()); |
177 | #else |
178 | if (address.base == regs.payloadGPR()) { |
179 | load32(address.withOffset(TagOffset), regs.tagGPR()); |
180 | load32(address.withOffset(PayloadOffset), regs.payloadGPR()); |
181 | } else { |
182 | load32(address.withOffset(PayloadOffset), regs.payloadGPR()); |
183 | load32(address.withOffset(TagOffset), regs.tagGPR()); |
184 | } |
185 | #endif |
186 | } |
187 | |
188 | void loadValue(BaseIndex address, JSValueRegs regs) |
189 | { |
190 | #if USE(JSVALUE64) |
191 | load64(address, regs.gpr()); |
192 | #else |
193 | if (address.base == regs.payloadGPR() || address.index == regs.payloadGPR()) { |
194 | // We actually could handle the case where the registers are aliased to both |
195 | // tag and payload, but we don't for now. |
196 | RELEASE_ASSERT(address.base != regs.tagGPR()); |
197 | RELEASE_ASSERT(address.index != regs.tagGPR()); |
198 | |
199 | load32(address.withOffset(TagOffset), regs.tagGPR()); |
200 | load32(address.withOffset(PayloadOffset), regs.payloadGPR()); |
201 | } else { |
202 | load32(address.withOffset(PayloadOffset), regs.payloadGPR()); |
203 | load32(address.withOffset(TagOffset), regs.tagGPR()); |
204 | } |
205 | #endif |
206 | } |
207 | |
208 | void loadValue(void* address, JSValueRegs regs) |
209 | { |
210 | #if USE(JSVALUE64) |
211 | load64(address, regs.gpr()); |
212 | #else |
213 | move(TrustedImmPtr(address), regs.payloadGPR()); |
214 | loadValue(Address(regs.payloadGPR()), regs); |
215 | #endif |
216 | } |
217 | |
218 | // Note that this clobbers offset. |
219 | void loadProperty(GPRReg object, GPRReg offset, JSValueRegs result); |
220 | |
221 | void moveValueRegs(JSValueRegs srcRegs, JSValueRegs destRegs) |
222 | { |
223 | #if USE(JSVALUE32_64) |
224 | if (destRegs.tagGPR() == srcRegs.payloadGPR()) { |
225 | if (destRegs.payloadGPR() == srcRegs.tagGPR()) { |
226 | swap(srcRegs.payloadGPR(), srcRegs.tagGPR()); |
227 | return; |
228 | } |
229 | move(srcRegs.payloadGPR(), destRegs.payloadGPR()); |
230 | move(srcRegs.tagGPR(), destRegs.tagGPR()); |
231 | return; |
232 | } |
233 | move(srcRegs.tagGPR(), destRegs.tagGPR()); |
234 | move(srcRegs.payloadGPR(), destRegs.payloadGPR()); |
235 | #else |
236 | move(srcRegs.gpr(), destRegs.gpr()); |
237 | #endif |
238 | } |
239 | |
240 | void moveValue(JSValue value, JSValueRegs regs) |
241 | { |
242 | #if USE(JSVALUE64) |
243 | move(Imm64(JSValue::encode(value)), regs.gpr()); |
244 | #else |
245 | move(Imm32(value.tag()), regs.tagGPR()); |
246 | move(Imm32(value.payload()), regs.payloadGPR()); |
247 | #endif |
248 | } |
249 | |
250 | void moveTrustedValue(JSValue value, JSValueRegs regs) |
251 | { |
252 | #if USE(JSVALUE64) |
253 | move(TrustedImm64(JSValue::encode(value)), regs.gpr()); |
254 | #else |
255 | move(TrustedImm32(value.tag()), regs.tagGPR()); |
256 | move(TrustedImm32(value.payload()), regs.payloadGPR()); |
257 | #endif |
258 | } |
259 | |
260 | void storeTrustedValue(JSValue value, Address address) |
261 | { |
262 | #if USE(JSVALUE64) |
263 | store64(TrustedImm64(JSValue::encode(value)), address); |
264 | #else |
265 | store32(TrustedImm32(value.tag()), address.withOffset(TagOffset)); |
266 | store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset)); |
267 | #endif |
268 | } |
269 | |
270 | void storeTrustedValue(JSValue value, BaseIndex address) |
271 | { |
272 | #if USE(JSVALUE64) |
273 | store64(TrustedImm64(JSValue::encode(value)), address); |
274 | #else |
275 | store32(TrustedImm32(value.tag()), address.withOffset(TagOffset)); |
276 | store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset)); |
277 | #endif |
278 | } |
279 | |
280 | Address addressFor(const RegisterAtOffset& entry) |
281 | { |
282 | return Address(GPRInfo::callFrameRegister, entry.offset()); |
283 | } |
284 | |
285 | void emitSave(const RegisterAtOffsetList& list) |
286 | { |
287 | for (const RegisterAtOffset& entry : list) { |
288 | if (entry.reg().isGPR()) |
289 | storePtr(entry.reg().gpr(), addressFor(entry)); |
290 | else |
291 | storeDouble(entry.reg().fpr(), addressFor(entry)); |
292 | } |
293 | } |
294 | |
295 | void emitRestore(const RegisterAtOffsetList& list) |
296 | { |
297 | for (const RegisterAtOffset& entry : list) { |
298 | if (entry.reg().isGPR()) |
299 | loadPtr(addressFor(entry), entry.reg().gpr()); |
300 | else |
301 | loadDouble(addressFor(entry), entry.reg().fpr()); |
302 | } |
303 | } |
304 | |
305 | void emitSaveCalleeSavesFor(CodeBlock* codeBlock) |
306 | { |
307 | ASSERT(codeBlock); |
308 | |
309 | const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); |
310 | RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); |
311 | unsigned registerCount = calleeSaves->size(); |
312 | |
313 | for (unsigned i = 0; i < registerCount; i++) { |
314 | RegisterAtOffset entry = calleeSaves->at(i); |
315 | if (dontSaveRegisters.get(entry.reg())) |
316 | continue; |
317 | storePtr(entry.reg().gpr(), Address(framePointerRegister, entry.offset())); |
318 | } |
319 | } |
320 | |
321 | enum RestoreTagRegisterMode { UseExistingTagRegisterContents, CopyBaselineCalleeSavedRegistersFromBaseFrame }; |
322 | |
323 | void emitSaveOrCopyCalleeSavesFor(CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, RestoreTagRegisterMode tagRegisterMode, GPRReg temp) |
324 | { |
325 | ASSERT(codeBlock); |
326 | |
327 | const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); |
328 | RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); |
329 | unsigned registerCount = calleeSaves->size(); |
330 | |
331 | #if USE(JSVALUE64) |
332 | RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters(); |
333 | #endif |
334 | |
335 | for (unsigned i = 0; i < registerCount; i++) { |
336 | RegisterAtOffset entry = calleeSaves->at(i); |
337 | if (dontSaveRegisters.get(entry.reg())) |
338 | continue; |
339 | |
340 | GPRReg registerToWrite; |
341 | |
342 | #if USE(JSVALUE32_64) |
343 | UNUSED_PARAM(tagRegisterMode); |
344 | UNUSED_PARAM(temp); |
345 | #else |
346 | if (tagRegisterMode == CopyBaselineCalleeSavedRegistersFromBaseFrame && baselineCalleeSaves.get(entry.reg())) { |
347 | registerToWrite = temp; |
348 | loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, entry.offset()), registerToWrite); |
349 | } else |
350 | #endif |
351 | registerToWrite = entry.reg().gpr(); |
352 | |
353 | storePtr(registerToWrite, Address(framePointerRegister, offsetVirtualRegister.offsetInBytes() + entry.offset())); |
354 | } |
355 | } |
356 | |
357 | void emitRestoreCalleeSavesFor(CodeBlock* codeBlock) |
358 | { |
359 | ASSERT(codeBlock); |
360 | |
361 | const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); |
362 | RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); |
363 | unsigned registerCount = calleeSaves->size(); |
364 | |
365 | for (unsigned i = 0; i < registerCount; i++) { |
366 | RegisterAtOffset entry = calleeSaves->at(i); |
367 | if (dontRestoreRegisters.get(entry.reg())) |
368 | continue; |
369 | loadPtr(Address(framePointerRegister, entry.offset()), entry.reg().gpr()); |
370 | } |
371 | } |
372 | |
373 | void emitSaveCalleeSaves() |
374 | { |
375 | emitSaveCalleeSavesFor(codeBlock()); |
376 | } |
377 | |
378 | void emitSaveThenMaterializeTagRegisters() |
379 | { |
380 | #if USE(JSVALUE64) |
381 | #if CPU(ARM64) |
382 | pushPair(GPRInfo::numberTagRegister, GPRInfo::notCellMaskRegister); |
383 | #else |
384 | push(GPRInfo::numberTagRegister); |
385 | push(GPRInfo::notCellMaskRegister); |
386 | #endif |
387 | emitMaterializeTagCheckRegisters(); |
388 | #endif |
389 | } |
390 | |
391 | void emitRestoreCalleeSaves() |
392 | { |
393 | emitRestoreCalleeSavesFor(codeBlock()); |
394 | } |
395 | |
396 | void emitRestoreSavedTagRegisters() |
397 | { |
398 | #if USE(JSVALUE64) |
399 | #if CPU(ARM64) |
400 | popPair(GPRInfo::numberTagRegister, GPRInfo::notCellMaskRegister); |
401 | #else |
402 | pop(GPRInfo::notCellMaskRegister); |
403 | pop(GPRInfo::numberTagRegister); |
404 | #endif |
405 | #endif |
406 | } |
407 | |
408 | // If you use this, be aware that vmGPR will get trashed. |
409 | void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(GPRReg vmGPR) |
410 | { |
411 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 |
412 | loadPtr(Address(vmGPR, VM::topEntryFrameOffset()), vmGPR); |
413 | copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(vmGPR); |
414 | #else |
415 | UNUSED_PARAM(vmGPR); |
416 | #endif |
417 | } |
418 | |
419 | void copyCalleeSavesToEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame) |
420 | { |
421 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 |
422 | const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() }; |
423 | GPRReg temp1 = usedRegisters.getFreeGPR(0); |
424 | loadPtr(&topEntryFrame, temp1); |
425 | copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(temp1); |
426 | #else |
427 | UNUSED_PARAM(topEntryFrame); |
428 | #endif |
429 | } |
430 | |
431 | void copyCalleeSavesToEntryFrameCalleeSavesBuffer(GPRReg topEntryFrame) |
432 | { |
433 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 |
434 | copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(topEntryFrame); |
435 | #else |
436 | UNUSED_PARAM(topEntryFrame); |
437 | #endif |
438 | } |
439 | |
440 | void restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(EntryFrame*&); |
441 | |
442 | void copyCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(EntryFrame*& topEntryFrame, const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() }) |
443 | { |
444 | #if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 |
445 | GPRReg temp1 = usedRegisters.getFreeGPR(0); |
446 | GPRReg temp2 = usedRegisters.getFreeGPR(1); |
447 | FPRReg fpTemp = usedRegisters.getFreeFPR(); |
448 | ASSERT(temp2 != InvalidGPRReg); |
449 | |
450 | ASSERT(codeBlock()); |
451 | |
452 | // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer |
453 | loadPtr(&topEntryFrame, temp1); |
454 | addPtr(TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), temp1); |
455 | |
456 | RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets(); |
457 | const RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters(); |
458 | RegisterSet dontCopyRegisters = RegisterSet::stackRegisters(); |
459 | unsigned registerCount = allCalleeSaves->size(); |
460 | |
461 | for (unsigned i = 0; i < registerCount; i++) { |
462 | RegisterAtOffset entry = allCalleeSaves->at(i); |
463 | if (dontCopyRegisters.get(entry.reg())) |
464 | continue; |
465 | RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(entry.reg()); |
466 | |
467 | if (entry.reg().isGPR()) { |
468 | GPRReg regToStore; |
469 | if (currentFrameEntry) { |
470 | // Load calleeSave from stack into temp register |
471 | regToStore = temp2; |
472 | loadPtr(Address(framePointerRegister, currentFrameEntry->offset()), regToStore); |
473 | } else |
474 | // Just store callee save directly |
475 | regToStore = entry.reg().gpr(); |
476 | |
477 | storePtr(regToStore, Address(temp1, entry.offset())); |
478 | } else { |
479 | FPRReg fpRegToStore; |
480 | if (currentFrameEntry) { |
481 | // Load calleeSave from stack into temp register |
482 | fpRegToStore = fpTemp; |
483 | loadDouble(Address(framePointerRegister, currentFrameEntry->offset()), fpRegToStore); |
484 | } else |
485 | // Just store callee save directly |
486 | fpRegToStore = entry.reg().fpr(); |
487 | |
488 | storeDouble(fpRegToStore, Address(temp1, entry.offset())); |
489 | } |
490 | } |
491 | #else |
492 | UNUSED_PARAM(topEntryFrame); |
493 | UNUSED_PARAM(usedRegisters); |
494 | #endif |
495 | } |
496 | |
497 | void emitMaterializeTagCheckRegisters() |
498 | { |
499 | #if USE(JSVALUE64) |
500 | move(MacroAssembler::TrustedImm64(JSValue::NumberTag), GPRInfo::numberTagRegister); |
501 | orPtr(MacroAssembler::TrustedImm32(JSValue::OtherTag), GPRInfo::numberTagRegister, GPRInfo::notCellMaskRegister); |
502 | #endif |
503 | } |
504 | |
505 | void clearStackFrame(GPRReg currentTop, GPRReg newTop, GPRReg temp, unsigned frameSize) |
506 | { |
507 | ASSERT(frameSize % stackAlignmentBytes() == 0); |
508 | if (frameSize <= 128) { |
509 | for (unsigned offset = 0; offset < frameSize; offset += sizeof(CPURegister)) |
510 | storePtr(TrustedImm32(0), Address(currentTop, -8 - offset)); |
511 | } else { |
512 | constexpr unsigned storeBytesPerIteration = stackAlignmentBytes(); |
513 | constexpr unsigned storesPerIteration = storeBytesPerIteration / sizeof(CPURegister); |
514 | |
515 | move(currentTop, temp); |
516 | Label zeroLoop = label(); |
517 | subPtr(TrustedImm32(storeBytesPerIteration), temp); |
518 | #if CPU(ARM64) |
519 | static_assert(storesPerIteration == 2, "clearStackFrame() for ARM64 assumes stack is 16 byte aligned" ); |
520 | storePair64(ARM64Registers::zr, ARM64Registers::zr, temp); |
521 | #else |
522 | for (unsigned i = storesPerIteration; i-- != 0;) |
523 | storePtr(TrustedImm32(0), Address(temp, sizeof(CPURegister) * i)); |
524 | #endif |
525 | branchPtr(NotEqual, temp, newTop).linkTo(zeroLoop, this); |
526 | } |
527 | } |
528 | |
529 | #if CPU(X86_64) |
530 | static constexpr size_t prologueStackPointerDelta() |
531 | { |
532 | // Prologue only saves the framePointerRegister |
533 | return sizeof(void*); |
534 | } |
535 | |
536 | void emitFunctionPrologue() |
537 | { |
538 | push(framePointerRegister); |
539 | move(stackPointerRegister, framePointerRegister); |
540 | } |
541 | |
542 | void emitFunctionEpilogueWithEmptyFrame() |
543 | { |
544 | pop(framePointerRegister); |
545 | } |
546 | |
547 | void emitFunctionEpilogue() |
548 | { |
549 | move(framePointerRegister, stackPointerRegister); |
550 | pop(framePointerRegister); |
551 | } |
552 | |
553 | void preserveReturnAddressAfterCall(GPRReg reg) |
554 | { |
555 | pop(reg); |
556 | } |
557 | |
558 | void restoreReturnAddressBeforeReturn(GPRReg reg) |
559 | { |
560 | push(reg); |
561 | } |
562 | |
563 | void restoreReturnAddressBeforeReturn(Address address) |
564 | { |
565 | push(address); |
566 | } |
567 | #endif // CPU(X86_64) |
568 | |
569 | #if CPU(ARM_THUMB2) || CPU(ARM64) |
570 | static constexpr size_t prologueStackPointerDelta() |
571 | { |
572 | // Prologue saves the framePointerRegister and linkRegister |
573 | return 2 * sizeof(void*); |
574 | } |
575 | |
576 | void emitFunctionPrologue() |
577 | { |
578 | tagReturnAddress(); |
579 | pushPair(framePointerRegister, linkRegister); |
580 | move(stackPointerRegister, framePointerRegister); |
581 | } |
582 | |
583 | void emitFunctionEpilogueWithEmptyFrame() |
584 | { |
585 | popPair(framePointerRegister, linkRegister); |
586 | } |
587 | |
588 | void emitFunctionEpilogue() |
589 | { |
590 | move(framePointerRegister, stackPointerRegister); |
591 | emitFunctionEpilogueWithEmptyFrame(); |
592 | } |
593 | |
594 | ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) |
595 | { |
596 | move(linkRegister, reg); |
597 | } |
598 | |
599 | ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg) |
600 | { |
601 | move(reg, linkRegister); |
602 | } |
603 | |
604 | ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address) |
605 | { |
606 | loadPtr(address, linkRegister); |
607 | } |
608 | #endif |
609 | |
610 | #if CPU(MIPS) |
611 | static constexpr size_t prologueStackPointerDelta() |
612 | { |
613 | // Prologue saves the framePointerRegister and returnAddressRegister |
614 | return 2 * sizeof(void*); |
615 | } |
616 | |
617 | void emitFunctionPrologue() |
618 | { |
619 | pushPair(framePointerRegister, returnAddressRegister); |
620 | move(stackPointerRegister, framePointerRegister); |
621 | } |
622 | |
623 | void emitFunctionEpilogueWithEmptyFrame() |
624 | { |
625 | popPair(framePointerRegister, returnAddressRegister); |
626 | } |
627 | |
628 | void emitFunctionEpilogue() |
629 | { |
630 | move(framePointerRegister, stackPointerRegister); |
631 | emitFunctionEpilogueWithEmptyFrame(); |
632 | } |
633 | |
634 | ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) |
635 | { |
636 | move(returnAddressRegister, reg); |
637 | } |
638 | |
639 | ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg) |
640 | { |
641 | move(reg, returnAddressRegister); |
642 | } |
643 | |
644 | ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address) |
645 | { |
646 | loadPtr(address, returnAddressRegister); |
647 | } |
648 | #endif |
649 | |
650 | void (int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) |
651 | { |
652 | loadPtr(Address(from, entry * sizeof(Register)), to); |
653 | } |
654 | void (int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) |
655 | { |
656 | load32(Address(from, entry * sizeof(Register)), to); |
657 | } |
658 | #if USE(JSVALUE64) |
659 | void (int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) |
660 | { |
661 | load64(Address(from, entry * sizeof(Register)), to); |
662 | } |
663 | #endif // USE(JSVALUE64) |
664 | void (GPRReg from, int entry) |
665 | { |
666 | storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); |
667 | } |
668 | |
669 | void (void* value, int entry) |
670 | { |
671 | storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); |
672 | } |
673 | |
674 | void (RegisterID to) |
675 | { |
676 | loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), to); |
677 | } |
678 | void (RegisterID from) |
679 | { |
680 | storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset())); |
681 | } |
682 | |
683 | void (RegisterID from) |
684 | { |
685 | storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); |
686 | } |
687 | void (TrustedImmPtr from) |
688 | { |
689 | storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); |
690 | } |
691 | |
692 | // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header |
693 | // fields before the code from emitFunctionPrologue() has executed. |
694 | // First, the access is via the stack pointer. Second, the address calculation must also take |
695 | // into account that the stack pointer may not have been adjusted down for the return PC and/or |
696 | // caller's frame pointer. On some platforms, the callee is responsible for pushing the |
697 | // "link register" containing the return address in the function prologue. |
698 | #if USE(JSVALUE64) |
699 | void (GPRReg from, int entry) |
700 | { |
701 | storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta())); |
702 | } |
703 | #else |
704 | void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, int entry) |
705 | { |
706 | storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); |
707 | } |
708 | |
709 | void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, int entry) |
710 | { |
711 | storePtr(tag, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); |
712 | } |
713 | #endif |
714 | |
715 | JumpList branchIfNotEqual(JSValueRegs regs, JSValue value) |
716 | { |
717 | #if USE(JSVALUE64) |
718 | return branch64(NotEqual, regs.gpr(), TrustedImm64(JSValue::encode(value))); |
719 | #else |
720 | JumpList result; |
721 | result.append(branch32(NotEqual, regs.tagGPR(), TrustedImm32(value.tag()))); |
722 | if (value.isEmpty() || value.isUndefinedOrNull()) |
723 | return result; // These don't have anything interesting in the payload. |
724 | result.append(branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload()))); |
725 | return result; |
726 | #endif |
727 | } |
728 | |
729 | Jump branchIfEqual(JSValueRegs regs, JSValue value) |
730 | { |
731 | #if USE(JSVALUE64) |
732 | return branch64(Equal, regs.gpr(), TrustedImm64(JSValue::encode(value))); |
733 | #else |
734 | Jump notEqual; |
735 | // These don't have anything interesting in the payload. |
736 | if (!value.isEmpty() && !value.isUndefinedOrNull()) |
737 | notEqual = branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload())); |
738 | Jump result = branch32(Equal, regs.tagGPR(), TrustedImm32(value.tag())); |
739 | if (notEqual.isSet()) |
740 | notEqual.link(this); |
741 | return result; |
742 | #endif |
743 | } |
744 | |
745 | Jump branchIfNotCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters) |
746 | { |
747 | #if USE(JSVALUE64) |
748 | if (mode == HaveTagRegisters) |
749 | return branchTest64(NonZero, reg, GPRInfo::notCellMaskRegister); |
750 | return branchTest64(NonZero, reg, TrustedImm64(JSValue::NotCellMask)); |
751 | #else |
752 | UNUSED_PARAM(mode); |
753 | return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag)); |
754 | #endif |
755 | } |
756 | |
757 | Jump branchIfNotCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) |
758 | { |
759 | #if USE(JSVALUE64) |
760 | return branchIfNotCell(regs.gpr(), mode); |
761 | #else |
762 | return branchIfNotCell(regs.tagGPR(), mode); |
763 | #endif |
764 | } |
765 | |
766 | Jump branchIfCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters) |
767 | { |
768 | #if USE(JSVALUE64) |
769 | if (mode == HaveTagRegisters) |
770 | return branchTest64(Zero, reg, GPRInfo::notCellMaskRegister); |
771 | return branchTest64(Zero, reg, TrustedImm64(JSValue::NotCellMask)); |
772 | #else |
773 | UNUSED_PARAM(mode); |
774 | return branch32(MacroAssembler::Equal, reg, TrustedImm32(JSValue::CellTag)); |
775 | #endif |
776 | } |
777 | Jump branchIfCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) |
778 | { |
779 | #if USE(JSVALUE64) |
780 | return branchIfCell(regs.gpr(), mode); |
781 | #else |
782 | return branchIfCell(regs.tagGPR(), mode); |
783 | #endif |
784 | } |
785 | |
786 | Jump branchIfOther(JSValueRegs regs, GPRReg tempGPR) |
787 | { |
788 | #if USE(JSVALUE64) |
789 | move(regs.gpr(), tempGPR); |
790 | and64(TrustedImm32(~JSValue::UndefinedTag), tempGPR); |
791 | return branch64(Equal, tempGPR, TrustedImm64(JSValue::ValueNull)); |
792 | #else |
793 | or32(TrustedImm32(1), regs.tagGPR(), tempGPR); |
794 | return branch32(Equal, tempGPR, TrustedImm32(JSValue::NullTag)); |
795 | #endif |
796 | } |
797 | |
798 | Jump branchIfNotOther(JSValueRegs regs, GPRReg tempGPR) |
799 | { |
800 | #if USE(JSVALUE64) |
801 | move(regs.gpr(), tempGPR); |
802 | and64(TrustedImm32(~JSValue::UndefinedTag), tempGPR); |
803 | return branch64(NotEqual, tempGPR, TrustedImm64(JSValue::ValueNull)); |
804 | #else |
805 | or32(TrustedImm32(1), regs.tagGPR(), tempGPR); |
806 | return branch32(NotEqual, tempGPR, TrustedImm32(JSValue::NullTag)); |
807 | #endif |
808 | } |
809 | |
810 | Jump branchIfInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters) |
811 | { |
812 | #if USE(JSVALUE64) |
813 | if (mode == HaveTagRegisters) |
814 | return branch64(AboveOrEqual, gpr, GPRInfo::numberTagRegister); |
815 | return branch64(AboveOrEqual, gpr, TrustedImm64(JSValue::NumberTag)); |
816 | #else |
817 | UNUSED_PARAM(mode); |
818 | return branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag)); |
819 | #endif |
820 | } |
821 | |
822 | Jump branchIfInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) |
823 | { |
824 | #if USE(JSVALUE64) |
825 | return branchIfInt32(regs.gpr(), mode); |
826 | #else |
827 | return branchIfInt32(regs.tagGPR(), mode); |
828 | #endif |
829 | } |
830 | |
831 | Jump branchIfNotInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters) |
832 | { |
833 | #if USE(JSVALUE64) |
834 | if (mode == HaveTagRegisters) |
835 | return branch64(Below, gpr, GPRInfo::numberTagRegister); |
836 | return branch64(Below, gpr, TrustedImm64(JSValue::NumberTag)); |
837 | #else |
838 | UNUSED_PARAM(mode); |
839 | return branch32(NotEqual, gpr, TrustedImm32(JSValue::Int32Tag)); |
840 | #endif |
841 | } |
842 | |
843 | Jump branchIfNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) |
844 | { |
845 | #if USE(JSVALUE64) |
846 | return branchIfNotInt32(regs.gpr(), mode); |
847 | #else |
848 | return branchIfNotInt32(regs.tagGPR(), mode); |
849 | #endif |
850 | } |
851 | |
852 | // Note that the tempGPR is not used in 64-bit mode. |
853 | Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters) |
854 | { |
855 | #if USE(JSVALUE64) |
856 | UNUSED_PARAM(tempGPR); |
857 | return branchIfNumber(regs.gpr(), mode); |
858 | #else |
859 | UNUSED_PARAM(mode); |
860 | ASSERT(tempGPR != InvalidGPRReg); |
861 | add32(TrustedImm32(1), regs.tagGPR(), tempGPR); |
862 | return branch32(Below, tempGPR, TrustedImm32(JSValue::LowestTag + 1)); |
863 | #endif |
864 | } |
865 | |
866 | #if USE(JSVALUE64) |
867 | Jump branchIfNumber(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters) |
868 | { |
869 | if (mode == HaveTagRegisters) |
870 | return branchTest64(NonZero, gpr, GPRInfo::numberTagRegister); |
871 | return branchTest64(NonZero, gpr, TrustedImm64(JSValue::NumberTag)); |
872 | } |
873 | #endif |
874 | |
875 | // Note that the tempGPR is not used in 64-bit mode. |
876 | Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters) |
877 | { |
878 | #if USE(JSVALUE64) |
879 | UNUSED_PARAM(tempGPR); |
880 | return branchIfNotNumber(regs.gpr(), mode); |
881 | #else |
882 | UNUSED_PARAM(mode); |
883 | add32(TrustedImm32(1), regs.tagGPR(), tempGPR); |
884 | return branch32(AboveOrEqual, tempGPR, TrustedImm32(JSValue::LowestTag + 1)); |
885 | #endif |
886 | } |
887 | |
888 | #if USE(JSVALUE64) |
889 | Jump branchIfNotNumber(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters) |
890 | { |
891 | if (mode == HaveTagRegisters) |
892 | return branchTest64(Zero, gpr, GPRInfo::numberTagRegister); |
893 | return branchTest64(Zero, gpr, TrustedImm64(JSValue::NumberTag)); |
894 | } |
895 | #endif |
896 | |
897 | Jump branchIfNotDoubleKnownNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) |
898 | { |
899 | #if USE(JSVALUE64) |
900 | if (mode == HaveTagRegisters) |
901 | return branchTest64(Zero, regs.gpr(), GPRInfo::numberTagRegister); |
902 | return branchTest64(Zero, regs.gpr(), TrustedImm64(JSValue::NumberTag)); |
903 | #else |
904 | UNUSED_PARAM(mode); |
905 | return branch32(AboveOrEqual, regs.tagGPR(), TrustedImm32(JSValue::LowestTag)); |
906 | #endif |
907 | } |
908 | |
909 | // Note that the tempGPR is not used in 32-bit mode. |
910 | Jump branchIfBoolean(GPRReg gpr, GPRReg tempGPR) |
911 | { |
912 | #if USE(JSVALUE64) |
913 | ASSERT(tempGPR != InvalidGPRReg); |
914 | move(gpr, tempGPR); |
915 | xor64(TrustedImm32(JSValue::ValueFalse), tempGPR); |
916 | return branchTest64(Zero, tempGPR, TrustedImm32(static_cast<int32_t>(~1))); |
917 | #else |
918 | UNUSED_PARAM(tempGPR); |
919 | return branch32(Equal, gpr, TrustedImm32(JSValue::BooleanTag)); |
920 | #endif |
921 | } |
922 | |
923 | // Note that the tempGPR is not used in 32-bit mode. |
924 | Jump branchIfBoolean(JSValueRegs regs, GPRReg tempGPR) |
925 | { |
926 | #if USE(JSVALUE64) |
927 | return branchIfBoolean(regs.gpr(), tempGPR); |
928 | #else |
929 | return branchIfBoolean(regs.tagGPR(), tempGPR); |
930 | #endif |
931 | } |
932 | |
933 | // Note that the tempGPR is not used in 32-bit mode. |
934 | Jump branchIfNotBoolean(GPRReg gpr, GPRReg tempGPR) |
935 | { |
936 | #if USE(JSVALUE64) |
937 | ASSERT(tempGPR != InvalidGPRReg); |
938 | move(gpr, tempGPR); |
939 | xor64(TrustedImm32(JSValue::ValueFalse), tempGPR); |
940 | return branchTest64(NonZero, tempGPR, TrustedImm32(static_cast<int32_t>(~1))); |
941 | #else |
942 | UNUSED_PARAM(tempGPR); |
943 | return branch32(NotEqual, gpr, TrustedImm32(JSValue::BooleanTag)); |
944 | #endif |
945 | } |
946 | |
947 | // Note that the tempGPR is not used in 32-bit mode. |
948 | Jump branchIfNotBoolean(JSValueRegs regs, GPRReg tempGPR) |
949 | { |
950 | #if USE(JSVALUE64) |
951 | return branchIfNotBoolean(regs.gpr(), tempGPR); |
952 | #else |
953 | return branchIfNotBoolean(regs.tagGPR(), tempGPR); |
954 | #endif |
955 | } |
956 | |
957 | Jump branchIfObject(GPRReg cellGPR) |
958 | { |
959 | return branch8( |
960 | AboveOrEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); |
961 | } |
962 | |
963 | Jump branchIfNotObject(GPRReg cellGPR) |
964 | { |
965 | return branch8( |
966 | Below, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); |
967 | } |
968 | |
969 | Jump branchIfType(GPRReg cellGPR, JSType type) |
970 | { |
971 | return branch8(Equal, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type)); |
972 | } |
973 | |
974 | Jump branchIfNotType(GPRReg cellGPR, JSType type) |
975 | { |
976 | return branch8(NotEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type)); |
977 | } |
978 | |
979 | Jump branchIfString(GPRReg cellGPR) { return branchIfType(cellGPR, StringType); } |
980 | Jump branchIfNotString(GPRReg cellGPR) { return branchIfNotType(cellGPR, StringType); } |
981 | Jump branchIfSymbol(GPRReg cellGPR) { return branchIfType(cellGPR, SymbolType); } |
982 | Jump branchIfNotSymbol(GPRReg cellGPR) { return branchIfNotType(cellGPR, SymbolType); } |
983 | Jump branchIfBigInt(GPRReg cellGPR) { return branchIfType(cellGPR, BigIntType); } |
984 | Jump branchIfNotBigInt(GPRReg cellGPR) { return branchIfNotType(cellGPR, BigIntType); } |
985 | Jump branchIfFunction(GPRReg cellGPR) { return branchIfType(cellGPR, JSFunctionType); } |
986 | Jump branchIfNotFunction(GPRReg cellGPR) { return branchIfNotType(cellGPR, JSFunctionType); } |
987 | |
988 | Jump branchIfEmpty(GPRReg gpr) |
989 | { |
990 | #if USE(JSVALUE64) |
991 | return branchTest64(Zero, gpr); |
992 | #else |
993 | return branch32(Equal, gpr, TrustedImm32(JSValue::EmptyValueTag)); |
994 | #endif |
995 | } |
996 | |
997 | Jump branchIfEmpty(JSValueRegs regs) |
998 | { |
999 | #if USE(JSVALUE64) |
1000 | return branchIfEmpty(regs.gpr()); |
1001 | #else |
1002 | return branchIfEmpty(regs.tagGPR()); |
1003 | #endif |
1004 | } |
1005 | |
1006 | Jump branchIfNotEmpty(GPRReg gpr) |
1007 | { |
1008 | #if USE(JSVALUE64) |
1009 | return branchTest64(NonZero, gpr); |
1010 | #else |
1011 | return branch32(NotEqual, gpr, TrustedImm32(JSValue::EmptyValueTag)); |
1012 | #endif |
1013 | } |
1014 | |
1015 | Jump branchIfNotEmpty(JSValueRegs regs) |
1016 | { |
1017 | #if USE(JSVALUE64) |
1018 | return branchIfNotEmpty(regs.gpr()); |
1019 | #else |
1020 | return branchIfNotEmpty(regs.tagGPR()); |
1021 | #endif |
1022 | } |
1023 | |
1024 | // Note that this function does not respect MasqueradesAsUndefined. |
1025 | Jump branchIfUndefined(GPRReg gpr) |
1026 | { |
1027 | #if USE(JSVALUE64) |
1028 | return branch64(Equal, gpr, TrustedImm64(JSValue::encode(jsUndefined()))); |
1029 | #else |
1030 | return branch32(Equal, gpr, TrustedImm32(JSValue::UndefinedTag)); |
1031 | #endif |
1032 | } |
1033 | |
1034 | // Note that this function does not respect MasqueradesAsUndefined. |
1035 | Jump branchIfUndefined(JSValueRegs regs) |
1036 | { |
1037 | #if USE(JSVALUE64) |
1038 | return branchIfUndefined(regs.gpr()); |
1039 | #else |
1040 | return branchIfUndefined(regs.tagGPR()); |
1041 | #endif |
1042 | } |
1043 | |
1044 | // Note that this function does not respect MasqueradesAsUndefined. |
1045 | Jump branchIfNotUndefined(GPRReg gpr) |
1046 | { |
1047 | #if USE(JSVALUE64) |
1048 | return branch64(NotEqual, gpr, TrustedImm64(JSValue::encode(jsUndefined()))); |
1049 | #else |
1050 | return branch32(NotEqual, gpr, TrustedImm32(JSValue::UndefinedTag)); |
1051 | #endif |
1052 | } |
1053 | |
1054 | // Note that this function does not respect MasqueradesAsUndefined. |
1055 | Jump branchIfNotUndefined(JSValueRegs regs) |
1056 | { |
1057 | #if USE(JSVALUE64) |
1058 | return branchIfNotUndefined(regs.gpr()); |
1059 | #else |
1060 | return branchIfNotUndefined(regs.tagGPR()); |
1061 | #endif |
1062 | } |
1063 | |
1064 | Jump branchIfNull(GPRReg gpr) |
1065 | { |
1066 | #if USE(JSVALUE64) |
1067 | return branch64(Equal, gpr, TrustedImm64(JSValue::encode(jsNull()))); |
1068 | #else |
1069 | return branch32(Equal, gpr, TrustedImm32(JSValue::NullTag)); |
1070 | #endif |
1071 | } |
1072 | |
1073 | Jump branchIfNull(JSValueRegs regs) |
1074 | { |
1075 | #if USE(JSVALUE64) |
1076 | return branchIfNull(regs.gpr()); |
1077 | #else |
1078 | return branchIfNull(regs.tagGPR()); |
1079 | #endif |
1080 | } |
1081 | |
1082 | Jump branchIfNotNull(GPRReg gpr) |
1083 | { |
1084 | #if USE(JSVALUE64) |
1085 | return branch64(NotEqual, gpr, TrustedImm64(JSValue::encode(jsNull()))); |
1086 | #else |
1087 | return branch32(NotEqual, gpr, TrustedImm32(JSValue::NullTag)); |
1088 | #endif |
1089 | } |
1090 | |
1091 | Jump branchIfNotNull(JSValueRegs regs) |
1092 | { |
1093 | #if USE(JSVALUE64) |
1094 | return branchIfNotNull(regs.gpr()); |
1095 | #else |
1096 | return branchIfNotNull(regs.tagGPR()); |
1097 | #endif |
1098 | } |
1099 | |
1100 | template<typename T> |
1101 | Jump branchStructure(RelationalCondition condition, T leftHandSide, Structure* structure) |
1102 | { |
1103 | #if USE(JSVALUE64) |
1104 | return branch32(condition, leftHandSide, TrustedImm32(structure->id())); |
1105 | #else |
1106 | return branchPtr(condition, leftHandSide, TrustedImmPtr(structure)); |
1107 | #endif |
1108 | } |
1109 | |
1110 | Jump branchIfFastTypedArray(GPRReg baseGPR); |
1111 | Jump branchIfNotFastTypedArray(GPRReg baseGPR); |
1112 | |
1113 | Jump branchIfNaN(FPRReg fpr) |
1114 | { |
1115 | return branchDouble(DoubleNotEqualOrUnordered, fpr, fpr); |
1116 | } |
1117 | |
1118 | Jump branchIfNotNaN(FPRReg fpr) |
1119 | { |
1120 | return branchDouble(DoubleEqual, fpr, fpr); |
1121 | } |
1122 | |
1123 | Jump branchIfRopeStringImpl(GPRReg stringImplGPR) |
1124 | { |
1125 | return branchTestPtr(NonZero, stringImplGPR, TrustedImm32(JSString::isRopeInPointer)); |
1126 | } |
1127 | |
1128 | Jump branchIfNotRopeStringImpl(GPRReg stringImplGPR) |
1129 | { |
1130 | return branchTestPtr(Zero, stringImplGPR, TrustedImm32(JSString::isRopeInPointer)); |
1131 | } |
1132 | |
1133 | static Address addressForByteOffset(ptrdiff_t byteOffset) |
1134 | { |
1135 | return Address(GPRInfo::callFrameRegister, byteOffset); |
1136 | } |
1137 | static Address addressFor(VirtualRegister virtualRegister, GPRReg baseReg) |
1138 | { |
1139 | ASSERT(virtualRegister.isValid()); |
1140 | return Address(baseReg, virtualRegister.offset() * sizeof(Register)); |
1141 | } |
1142 | static Address addressFor(VirtualRegister virtualRegister) |
1143 | { |
1144 | // NB. It's tempting on some architectures to sometimes use an offset from the stack |
1145 | // register because for some offsets that will encode to a smaller instruction. But we |
1146 | // cannot do this. We use this in places where the stack pointer has been moved to some |
1147 | // unpredictable location. |
1148 | ASSERT(virtualRegister.isValid()); |
1149 | return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register)); |
1150 | } |
1151 | static Address addressFor(int operand) |
1152 | { |
1153 | return addressFor(static_cast<VirtualRegister>(operand)); |
1154 | } |
1155 | |
1156 | static Address tagFor(VirtualRegister virtualRegister, GPRReg baseGPR) |
1157 | { |
1158 | ASSERT(virtualRegister.isValid()); |
1159 | return Address(baseGPR, virtualRegister.offset() * sizeof(Register) + TagOffset); |
1160 | } |
1161 | static Address tagFor(VirtualRegister virtualRegister) |
1162 | { |
1163 | ASSERT(virtualRegister.isValid()); |
1164 | return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset); |
1165 | } |
1166 | static Address tagFor(int operand) |
1167 | { |
1168 | return tagFor(static_cast<VirtualRegister>(operand)); |
1169 | } |
1170 | |
1171 | static Address payloadFor(VirtualRegister virtualRegister, GPRReg baseGPR) |
1172 | { |
1173 | ASSERT(virtualRegister.isValid()); |
1174 | return Address(baseGPR, virtualRegister.offset() * sizeof(Register) + PayloadOffset); |
1175 | } |
1176 | static Address payloadFor(VirtualRegister virtualRegister) |
1177 | { |
1178 | ASSERT(virtualRegister.isValid()); |
1179 | return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset); |
1180 | } |
1181 | static Address payloadFor(int operand) |
1182 | { |
1183 | return payloadFor(static_cast<VirtualRegister>(operand)); |
1184 | } |
1185 | |
1186 | // Access to our fixed callee CallFrame. |
1187 | static Address calleeFrameSlot(int slot) |
1188 | { |
1189 | ASSERT(slot >= CallerFrameAndPC::sizeInRegisters); |
1190 | return Address(stackPointerRegister, sizeof(Register) * (slot - CallerFrameAndPC::sizeInRegisters)); |
1191 | } |
1192 | |
1193 | // Access to our fixed callee CallFrame. |
1194 | static Address calleeArgumentSlot(int argument) |
1195 | { |
1196 | return calleeFrameSlot(virtualRegisterForArgument(argument).offset()); |
1197 | } |
1198 | |
1199 | static Address calleeFrameTagSlot(int slot) |
1200 | { |
1201 | return calleeFrameSlot(slot).withOffset(TagOffset); |
1202 | } |
1203 | |
1204 | static Address calleeFramePayloadSlot(int slot) |
1205 | { |
1206 | return calleeFrameSlot(slot).withOffset(PayloadOffset); |
1207 | } |
1208 | |
1209 | static Address calleeArgumentTagSlot(int argument) |
1210 | { |
1211 | return calleeArgumentSlot(argument).withOffset(TagOffset); |
1212 | } |
1213 | |
1214 | static Address calleeArgumentPayloadSlot(int argument) |
1215 | { |
1216 | return calleeArgumentSlot(argument).withOffset(PayloadOffset); |
1217 | } |
1218 | |
1219 | static Address calleeFrameCallerFrame() |
1220 | { |
1221 | return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset()); |
1222 | } |
1223 | |
1224 | static GPRReg selectScratchGPR(RegisterSet preserved) |
1225 | { |
1226 | GPRReg registers[] = { |
1227 | GPRInfo::regT0, |
1228 | GPRInfo::regT1, |
1229 | GPRInfo::regT2, |
1230 | GPRInfo::regT3, |
1231 | GPRInfo::regT4, |
1232 | GPRInfo::regT5, |
1233 | }; |
1234 | |
1235 | for (GPRReg reg : registers) { |
1236 | if (!preserved.contains(reg)) |
1237 | return reg; |
1238 | } |
1239 | RELEASE_ASSERT_NOT_REACHED(); |
1240 | return InvalidGPRReg; |
1241 | } |
1242 | |
1243 | template<typename... Regs> |
1244 | static GPRReg selectScratchGPR(Regs... args) |
1245 | { |
1246 | RegisterSet set; |
1247 | constructRegisterSet(set, args...); |
1248 | return selectScratchGPR(set); |
1249 | } |
1250 | |
1251 | static void constructRegisterSet(RegisterSet&) |
1252 | { |
1253 | } |
1254 | |
1255 | template<typename... Regs> |
1256 | static void constructRegisterSet(RegisterSet& set, JSValueRegs regs, Regs... args) |
1257 | { |
1258 | if (regs.tagGPR() != InvalidGPRReg) |
1259 | set.set(regs.tagGPR()); |
1260 | if (regs.payloadGPR() != InvalidGPRReg) |
1261 | set.set(regs.payloadGPR()); |
1262 | constructRegisterSet(set, args...); |
1263 | } |
1264 | |
1265 | template<typename... Regs> |
1266 | static void constructRegisterSet(RegisterSet& set, GPRReg reg, Regs... args) |
1267 | { |
1268 | if (reg != InvalidGPRReg) |
1269 | set.set(reg); |
1270 | constructRegisterSet(set, args...); |
1271 | } |
1272 | |
1273 | // Add a debug call. This call has no effect on JIT code execution state. |
1274 | void debugCall(VM&, V_DebugOperation_EPP function, void* argument); |
1275 | |
1276 | // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs. |
1277 | #if !ASSERT_DISABLED |
1278 | void jitAssertIsInt32(GPRReg); |
1279 | void jitAssertIsJSInt32(GPRReg); |
1280 | void jitAssertIsJSNumber(GPRReg); |
1281 | void jitAssertIsJSDouble(GPRReg); |
1282 | void jitAssertIsCell(GPRReg); |
1283 | void jitAssertHasValidCallFrame(); |
1284 | void jitAssertIsNull(GPRReg); |
1285 | void jitAssertTagsInPlace(); |
1286 | void jitAssertArgumentCountSane(); |
1287 | #else |
1288 | void jitAssertIsInt32(GPRReg) { } |
1289 | void jitAssertIsJSInt32(GPRReg) { } |
1290 | void jitAssertIsJSNumber(GPRReg) { } |
1291 | void jitAssertIsJSDouble(GPRReg) { } |
1292 | void jitAssertIsCell(GPRReg) { } |
1293 | void jitAssertHasValidCallFrame() { } |
1294 | void jitAssertIsNull(GPRReg) { } |
1295 | void jitAssertTagsInPlace() { } |
1296 | void jitAssertArgumentCountSane() { } |
1297 | #endif |
1298 | |
1299 | void jitReleaseAssertNoException(VM&); |
1300 | |
1301 | void incrementSuperSamplerCount(); |
1302 | void decrementSuperSamplerCount(); |
1303 | |
1304 | void purifyNaN(FPRReg); |
1305 | |
1306 | // These methods convert between doubles, and doubles boxed and JSValues. |
1307 | #if USE(JSVALUE64) |
1308 | GPRReg boxDouble(FPRReg fpr, GPRReg gpr, TagRegistersMode mode = HaveTagRegisters) |
1309 | { |
1310 | moveDoubleTo64(fpr, gpr); |
1311 | if (mode == DoNotHaveTagRegisters) |
1312 | sub64(TrustedImm64(JSValue::NumberTag), gpr); |
1313 | else { |
1314 | sub64(GPRInfo::numberTagRegister, gpr); |
1315 | jitAssertIsJSDouble(gpr); |
1316 | } |
1317 | return gpr; |
1318 | } |
1319 | FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, GPRReg resultGPR, FPRReg fpr) |
1320 | { |
1321 | add64(GPRInfo::numberTagRegister, gpr, resultGPR); |
1322 | move64ToDouble(resultGPR, fpr); |
1323 | return fpr; |
1324 | } |
1325 | FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr) |
1326 | { |
1327 | jitAssertIsJSDouble(gpr); |
1328 | return unboxDoubleWithoutAssertions(gpr, resultGPR, fpr); |
1329 | } |
1330 | |
1331 | void boxDouble(FPRReg fpr, JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) |
1332 | { |
1333 | boxDouble(fpr, regs.gpr(), mode); |
1334 | } |
1335 | |
1336 | void unboxDoubleNonDestructive(JSValueRegs regs, FPRReg destFPR, GPRReg resultGPR, FPRReg) |
1337 | { |
1338 | unboxDouble(regs.payloadGPR(), resultGPR, destFPR); |
1339 | } |
1340 | |
1341 | // Here are possible arrangements of source, target, scratch: |
1342 | // - source, target, scratch can all be separate registers. |
1343 | // - source and target can be the same but scratch is separate. |
1344 | // - target and scratch can be the same but source is separate. |
1345 | void boxInt52(GPRReg source, GPRReg target, GPRReg scratch, FPRReg fpScratch) |
1346 | { |
1347 | // Is it an int32? |
1348 | signExtend32ToPtr(source, scratch); |
1349 | Jump isInt32 = branch64(Equal, source, scratch); |
1350 | |
1351 | // Nope, it's not, but regT0 contains the int64 value. |
1352 | convertInt64ToDouble(source, fpScratch); |
1353 | boxDouble(fpScratch, target); |
1354 | Jump done = jump(); |
1355 | |
1356 | isInt32.link(this); |
1357 | zeroExtend32ToPtr(source, target); |
1358 | or64(GPRInfo::numberTagRegister, target); |
1359 | |
1360 | done.link(this); |
1361 | } |
1362 | #endif |
1363 | |
1364 | #if USE(JSVALUE32_64) |
1365 | void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR) |
1366 | { |
1367 | moveDoubleToInts(fpr, payloadGPR, tagGPR); |
1368 | } |
1369 | void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR) |
1370 | { |
1371 | moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR); |
1372 | } |
1373 | |
1374 | void boxDouble(FPRReg fpr, JSValueRegs regs) |
1375 | { |
1376 | boxDouble(fpr, regs.tagGPR(), regs.payloadGPR()); |
1377 | } |
1378 | void unboxDouble(JSValueRegs regs, FPRReg fpr, FPRReg scratchFPR) |
1379 | { |
1380 | unboxDouble(regs.tagGPR(), regs.payloadGPR(), fpr, scratchFPR); |
1381 | } |
1382 | |
1383 | void unboxDoubleNonDestructive(const JSValueRegs regs, FPRReg destFPR, GPRReg, FPRReg scratchFPR) |
1384 | { |
1385 | unboxDouble(regs, destFPR, scratchFPR); |
1386 | } |
1387 | #endif |
1388 | |
1389 | void boxBooleanPayload(GPRReg boolGPR, GPRReg payloadGPR) |
1390 | { |
1391 | #if USE(JSVALUE64) |
1392 | add32(TrustedImm32(JSValue::ValueFalse), boolGPR, payloadGPR); |
1393 | #else |
1394 | move(boolGPR, payloadGPR); |
1395 | #endif |
1396 | } |
1397 | |
1398 | void boxBooleanPayload(bool value, GPRReg payloadGPR) |
1399 | { |
1400 | #if USE(JSVALUE64) |
1401 | move(TrustedImm32(JSValue::ValueFalse + value), payloadGPR); |
1402 | #else |
1403 | move(TrustedImm32(value), payloadGPR); |
1404 | #endif |
1405 | } |
1406 | |
1407 | void boxBoolean(GPRReg boolGPR, JSValueRegs boxedRegs) |
1408 | { |
1409 | boxBooleanPayload(boolGPR, boxedRegs.payloadGPR()); |
1410 | #if USE(JSVALUE32_64) |
1411 | move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR()); |
1412 | #endif |
1413 | } |
1414 | |
1415 | void boxBoolean(bool value, JSValueRegs boxedRegs) |
1416 | { |
1417 | boxBooleanPayload(value, boxedRegs.payloadGPR()); |
1418 | #if USE(JSVALUE32_64) |
1419 | move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR()); |
1420 | #endif |
1421 | } |
1422 | |
1423 | void boxInt32(GPRReg intGPR, JSValueRegs boxedRegs, TagRegistersMode mode = HaveTagRegisters) |
1424 | { |
1425 | #if USE(JSVALUE64) |
1426 | if (mode == DoNotHaveTagRegisters) { |
1427 | move(intGPR, boxedRegs.gpr()); |
1428 | or64(TrustedImm64(JSValue::NumberTag), boxedRegs.gpr()); |
1429 | } else |
1430 | or64(GPRInfo::numberTagRegister, intGPR, boxedRegs.gpr()); |
1431 | #else |
1432 | UNUSED_PARAM(mode); |
1433 | move(intGPR, boxedRegs.payloadGPR()); |
1434 | move(TrustedImm32(JSValue::Int32Tag), boxedRegs.tagGPR()); |
1435 | #endif |
1436 | } |
1437 | |
1438 | void boxCell(GPRReg cellGPR, JSValueRegs boxedRegs) |
1439 | { |
1440 | #if USE(JSVALUE64) |
1441 | move(cellGPR, boxedRegs.gpr()); |
1442 | #else |
1443 | move(cellGPR, boxedRegs.payloadGPR()); |
1444 | move(TrustedImm32(JSValue::CellTag), boxedRegs.tagGPR()); |
1445 | #endif |
1446 | } |
1447 | |
1448 | void callExceptionFuzz(VM&); |
1449 | |
1450 | enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck }; |
1451 | enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth }; |
1452 | JS_EXPORT_PRIVATE Jump emitExceptionCheck( |
1453 | VM&, ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth); |
1454 | JS_EXPORT_PRIVATE Jump emitNonPatchableExceptionCheck(VM&); |
1455 | Jump emitJumpIfException(VM&); |
1456 | |
1457 | #if ENABLE(SAMPLING_COUNTERS) |
1458 | static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1) |
1459 | { |
1460 | jit.add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter())); |
1461 | } |
1462 | void emitCount(AbstractSamplingCounter& counter, int32_t increment = 1) |
1463 | { |
1464 | add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter())); |
1465 | } |
1466 | #endif |
1467 | |
1468 | #if ENABLE(SAMPLING_FLAGS) |
1469 | void setSamplingFlag(int32_t); |
1470 | void clearSamplingFlag(int32_t flag); |
1471 | #endif |
1472 | |
1473 | JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin) |
1474 | { |
1475 | return codeBlock()->globalObjectFor(codeOrigin); |
1476 | } |
1477 | |
1478 | bool isStrictModeFor(CodeOrigin codeOrigin) |
1479 | { |
1480 | auto* inlineCallFrame = codeOrigin.inlineCallFrame(); |
1481 | if (!inlineCallFrame) |
1482 | return codeBlock()->isStrictMode(); |
1483 | return inlineCallFrame->isStrictMode(); |
1484 | } |
1485 | |
1486 | ECMAMode ecmaModeFor(CodeOrigin codeOrigin) |
1487 | { |
1488 | return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode; |
1489 | } |
1490 | |
1491 | ExecutableBase* executableFor(const CodeOrigin& codeOrigin); |
1492 | |
1493 | CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin) |
1494 | { |
1495 | return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock()); |
1496 | } |
1497 | |
1498 | CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame) |
1499 | { |
1500 | if (!inlineCallFrame) |
1501 | return baselineCodeBlock(); |
1502 | return baselineCodeBlockForInlineCallFrame(inlineCallFrame); |
1503 | } |
1504 | |
1505 | CodeBlock* baselineCodeBlock() |
1506 | { |
1507 | return m_baselineCodeBlock; |
1508 | } |
1509 | |
1510 | static VirtualRegister argumentsStart(InlineCallFrame* inlineCallFrame) |
1511 | { |
1512 | if (!inlineCallFrame) |
1513 | return VirtualRegister(CallFrame::argumentOffset(0)); |
1514 | if (inlineCallFrame->argumentsWithFixup.size() <= 1) |
1515 | return virtualRegisterForLocal(0); |
1516 | ValueRecovery recovery = inlineCallFrame->argumentsWithFixup[1]; |
1517 | RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack); |
1518 | return recovery.virtualRegister(); |
1519 | } |
1520 | |
1521 | static VirtualRegister argumentsStart(const CodeOrigin& codeOrigin) |
1522 | { |
1523 | return argumentsStart(codeOrigin.inlineCallFrame()); |
1524 | } |
1525 | |
1526 | static VirtualRegister argumentCount(InlineCallFrame* inlineCallFrame) |
1527 | { |
1528 | ASSERT(!inlineCallFrame || inlineCallFrame->isVarargs()); |
1529 | if (!inlineCallFrame) |
1530 | return VirtualRegister(CallFrameSlot::argumentCount); |
1531 | return inlineCallFrame->argumentCountRegister; |
1532 | } |
1533 | |
1534 | static VirtualRegister argumentCount(const CodeOrigin& codeOrigin) |
1535 | { |
1536 | return argumentCount(codeOrigin.inlineCallFrame()); |
1537 | } |
1538 | |
1539 | void emitLoadStructure(VM&, RegisterID source, RegisterID dest, RegisterID scratch); |
1540 | |
1541 | void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID) |
1542 | { |
1543 | emitStoreStructureWithTypeInfo(*this, structure, dest); |
1544 | } |
1545 | |
1546 | void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch) |
1547 | { |
1548 | #if USE(JSVALUE64) |
1549 | load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch); |
1550 | store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset())); |
1551 | #else |
1552 | // Store all the info flags using a single 32-bit wide load and store. |
1553 | load32(MacroAssembler::Address(structure, Structure::indexingModeIncludingHistoryOffset()), scratch); |
1554 | store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset())); |
1555 | |
1556 | // Store the StructureID |
1557 | storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset())); |
1558 | #endif |
1559 | } |
1560 | |
1561 | static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest); |
1562 | |
1563 | Jump barrierBranchWithoutFence(GPRReg cell) |
1564 | { |
1565 | return branch8(Above, Address(cell, JSCell::cellStateOffset()), TrustedImm32(blackThreshold)); |
1566 | } |
1567 | |
1568 | Jump barrierBranchWithoutFence(JSCell* cell) |
1569 | { |
1570 | uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset(); |
1571 | return branch8(Above, AbsoluteAddress(address), TrustedImm32(blackThreshold)); |
1572 | } |
1573 | |
1574 | Jump barrierBranch(VM& vm, GPRReg cell, GPRReg scratchGPR) |
1575 | { |
1576 | load8(Address(cell, JSCell::cellStateOffset()), scratchGPR); |
1577 | return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold())); |
1578 | } |
1579 | |
1580 | Jump barrierBranch(VM& vm, JSCell* cell, GPRReg scratchGPR) |
1581 | { |
1582 | uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset(); |
1583 | load8(address, scratchGPR); |
1584 | return branch32(Above, scratchGPR, AbsoluteAddress(vm.heap.addressOfBarrierThreshold())); |
1585 | } |
1586 | |
1587 | void barrierStoreLoadFence(VM& vm) |
1588 | { |
1589 | Jump ok = jumpIfMutatorFenceNotNeeded(vm); |
1590 | memoryFence(); |
1591 | ok.link(this); |
1592 | } |
1593 | |
1594 | void mutatorFence(VM& vm) |
1595 | { |
1596 | if (isX86()) |
1597 | return; |
1598 | Jump ok = jumpIfMutatorFenceNotNeeded(vm); |
1599 | storeFence(); |
1600 | ok.link(this); |
1601 | } |
1602 | |
1603 | void cageWithoutUntagging(Gigacage::Kind kind, GPRReg storage) |
1604 | { |
1605 | #if GIGACAGE_ENABLED |
1606 | if (!Gigacage::isEnabled(kind)) |
1607 | return; |
1608 | |
1609 | #if CPU(ARM64E) |
1610 | RegisterID tempReg = InvalidGPRReg; |
1611 | if (kind == Gigacage::Primitive) { |
1612 | tempReg = getCachedMemoryTempRegisterIDAndInvalidate(); |
1613 | move(storage, tempReg); |
1614 | // Flip the registers since bitFieldInsert only inserts into the low bits. |
1615 | std::swap(storage, tempReg); |
1616 | } |
1617 | #endif |
1618 | andPtr(TrustedImmPtr(Gigacage::mask(kind)), storage); |
1619 | addPtr(TrustedImmPtr(Gigacage::basePtr(kind)), storage); |
1620 | #if CPU(ARM64E) |
1621 | if (kind == Gigacage::Primitive) |
1622 | bitFieldInsert64(storage, 0, 64 - numberOfPACBits, tempReg); |
1623 | #endif |
1624 | |
1625 | #else |
1626 | UNUSED_PARAM(kind); |
1627 | UNUSED_PARAM(storage); |
1628 | #endif |
1629 | } |
1630 | |
1631 | // length may be the same register as scratch. |
1632 | void cageConditionally(Gigacage::Kind kind, GPRReg storage, GPRReg length, GPRReg scratch) |
1633 | { |
1634 | #if GIGACAGE_ENABLED |
1635 | if (Gigacage::isEnabled(kind)) { |
1636 | if (kind != Gigacage::Primitive || Gigacage::isDisablingPrimitiveGigacageForbidden()) |
1637 | cageWithoutUntagging(kind, storage); |
1638 | else { |
1639 | #if CPU(ARM64E) |
1640 | if (length == scratch) |
1641 | scratch = getCachedMemoryTempRegisterIDAndInvalidate(); |
1642 | #endif |
1643 | loadPtr(Gigacage::addressOfBasePtr(kind), scratch); |
1644 | Jump done = branchTest64(Zero, scratch); |
1645 | #if CPU(ARM64E) |
1646 | GPRReg tempReg = getCachedDataTempRegisterIDAndInvalidate(); |
1647 | move(storage, tempReg); |
1648 | ASSERT(LogicalImmediate::create64(Gigacage::mask(kind)).isValid()); |
1649 | andPtr(TrustedImmPtr(Gigacage::mask(kind)), tempReg); |
1650 | addPtr(scratch, tempReg); |
1651 | bitFieldInsert64(tempReg, 0, 64 - numberOfPACBits, storage); |
1652 | #else |
1653 | andPtr(TrustedImmPtr(Gigacage::mask(kind)), storage); |
1654 | addPtr(scratch, storage); |
1655 | #endif // CPU(ARM64E) |
1656 | done.link(this); |
1657 | } |
1658 | } |
1659 | #endif |
1660 | |
1661 | #if CPU(ARM64E) |
1662 | if (kind == Gigacage::Primitive) |
1663 | untagArrayPtr(length, storage); |
1664 | #endif |
1665 | UNUSED_PARAM(kind); |
1666 | UNUSED_PARAM(storage); |
1667 | UNUSED_PARAM(length); |
1668 | UNUSED_PARAM(scratch); |
1669 | } |
1670 | |
1671 | void emitComputeButterflyIndexingMask(GPRReg vectorLengthGPR, GPRReg scratchGPR, GPRReg resultGPR) |
1672 | { |
1673 | ASSERT(scratchGPR != resultGPR); |
1674 | Jump done; |
1675 | // If vectorLength == 0 then clz will return 32 on both ARM and x86. On 64-bit systems, we can then do a 64-bit right shift on a 32-bit -1 to get a 0 mask for zero vectorLength. On 32-bit ARM, shift masks with 0xff, which means it will still create a 0 mask. |
1676 | countLeadingZeros32(vectorLengthGPR, scratchGPR); |
1677 | move(TrustedImm32(-1), resultGPR); |
1678 | urshiftPtr(scratchGPR, resultGPR); |
1679 | if (done.isSet()) |
1680 | done.link(this); |
1681 | } |
1682 | |
1683 | // If for whatever reason the butterfly is going to change vector length this function does NOT |
1684 | // update the indexing mask. |
1685 | void nukeStructureAndStoreButterfly(VM& vm, GPRReg butterfly, GPRReg object) |
1686 | { |
1687 | if (isX86()) { |
1688 | or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset())); |
1689 | storePtr(butterfly, Address(object, JSObject::butterflyOffset())); |
1690 | return; |
1691 | } |
1692 | |
1693 | Jump ok = jumpIfMutatorFenceNotNeeded(vm); |
1694 | or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset())); |
1695 | storeFence(); |
1696 | storePtr(butterfly, Address(object, JSObject::butterflyOffset())); |
1697 | storeFence(); |
1698 | Jump done = jump(); |
1699 | ok.link(this); |
1700 | storePtr(butterfly, Address(object, JSObject::butterflyOffset())); |
1701 | done.link(this); |
1702 | } |
1703 | |
1704 | Jump jumpIfMutatorFenceNotNeeded(VM& vm) |
1705 | { |
1706 | return branchTest8(Zero, AbsoluteAddress(vm.heap.addressOfMutatorShouldBeFenced())); |
1707 | } |
1708 | |
1709 | void sanitizeStackInline(VM&, GPRReg scratch); |
1710 | |
1711 | // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The |
1712 | // functor is called at those points where we have pinpointed a type. One way to use this is to |
1713 | // have the functor emit the code to put the type string into an appropriate register and then |
1714 | // jump out. A secondary functor is used for the call trap and masquerades-as-undefined slow |
1715 | // case. It is passed the unlinked jump to the slow case. |
1716 | template<typename Functor, typename SlowPathFunctor> |
1717 | void emitTypeOf( |
1718 | JSValueRegs regs, GPRReg tempGPR, const Functor& functor, |
1719 | const SlowPathFunctor& slowPathFunctor) |
1720 | { |
1721 | // Implements the following branching structure: |
1722 | // |
1723 | // if (is cell) { |
1724 | // if (is object) { |
1725 | // if (is function) { |
1726 | // return function; |
1727 | // } else if (doesn't have call trap and doesn't masquerade as undefined) { |
1728 | // return object |
1729 | // } else { |
1730 | // return slowPath(); |
1731 | // } |
1732 | // } else if (is string) { |
1733 | // return string |
1734 | // } else if (is bigint) { |
1735 | // return bigint |
1736 | // } else { |
1737 | // return symbol |
1738 | // } |
1739 | // } else if (is number) { |
1740 | // return number |
1741 | // } else if (is null) { |
1742 | // return object |
1743 | // } else if (is boolean) { |
1744 | // return boolean |
1745 | // } else { |
1746 | // return undefined |
1747 | // } |
1748 | // |
1749 | // FIXME: typeof Symbol should be more frequently seen than BigInt. |
1750 | // We should change the order of type detection based on this frequency. |
1751 | // https://bugs.webkit.org/show_bug.cgi?id=192650 |
1752 | |
1753 | Jump notCell = branchIfNotCell(regs); |
1754 | |
1755 | GPRReg cellGPR = regs.payloadGPR(); |
1756 | Jump notObject = branchIfNotObject(cellGPR); |
1757 | |
1758 | Jump notFunction = branchIfNotFunction(cellGPR); |
1759 | functor(TypeofType::Function, false); |
1760 | |
1761 | notFunction.link(this); |
1762 | slowPathFunctor( |
1763 | branchTest8( |
1764 | NonZero, |
1765 | Address(cellGPR, JSCell::typeInfoFlagsOffset()), |
1766 | TrustedImm32(MasqueradesAsUndefined | OverridesGetCallData))); |
1767 | functor(TypeofType::Object, false); |
1768 | |
1769 | notObject.link(this); |
1770 | |
1771 | Jump notString = branchIfNotString(cellGPR); |
1772 | functor(TypeofType::String, false); |
1773 | |
1774 | notString.link(this); |
1775 | |
1776 | Jump notBigInt = branchIfNotBigInt(cellGPR); |
1777 | functor(TypeofType::BigInt, false); |
1778 | |
1779 | notBigInt.link(this); |
1780 | functor(TypeofType::Symbol, false); |
1781 | |
1782 | notCell.link(this); |
1783 | |
1784 | Jump notNumber = branchIfNotNumber(regs, tempGPR); |
1785 | functor(TypeofType::Number, false); |
1786 | notNumber.link(this); |
1787 | |
1788 | JumpList notNull = branchIfNotEqual(regs, jsNull()); |
1789 | functor(TypeofType::Object, false); |
1790 | notNull.link(this); |
1791 | |
1792 | Jump notBoolean = branchIfNotBoolean(regs, tempGPR); |
1793 | functor(TypeofType::Boolean, false); |
1794 | notBoolean.link(this); |
1795 | |
1796 | functor(TypeofType::Undefined, true); |
1797 | } |
1798 | |
1799 | void emitDumbVirtualCall(VM&, JSGlobalObject*, CallLinkInfo*); |
1800 | |
1801 | void makeSpaceOnStackForCCall(); |
1802 | void reclaimSpaceOnStackForCCall(); |
1803 | |
1804 | #if USE(JSVALUE64) |
1805 | void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result); |
1806 | void emitRandomThunk(VM&, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result); |
1807 | #endif |
1808 | |
1809 | // Call this if you know that the value held in allocatorGPR is non-null. This DOES NOT mean |
1810 | // that allocator is non-null; allocator can be null as a signal that we don't know what the |
1811 | // value of allocatorGPR is. Additionally, if the allocator is not null, then there is no need |
1812 | // to populate allocatorGPR - this code will ignore the contents of allocatorGPR. |
1813 | void emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath); |
1814 | |
1815 | void emitAllocate(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath); |
1816 | |
1817 | template<typename StructureType> |
1818 | void emitAllocateJSCell(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure, GPRReg scratchGPR, JumpList& slowPath) |
1819 | { |
1820 | emitAllocate(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath); |
1821 | emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR); |
1822 | } |
1823 | |
1824 | template<typename StructureType, typename StorageType> |
1825 | void emitAllocateJSObject(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, StructureType structure, StorageType storage, GPRReg scratchGPR, JumpList& slowPath) |
1826 | { |
1827 | emitAllocateJSCell(resultGPR, allocator, allocatorGPR, structure, scratchGPR, slowPath); |
1828 | storePtr(storage, Address(resultGPR, JSObject::butterflyOffset())); |
1829 | } |
1830 | |
1831 | template<typename ClassType, typename StructureType, typename StorageType> |
1832 | void emitAllocateJSObjectWithKnownSize( |
1833 | VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, |
1834 | GPRReg scratchGPR2, JumpList& slowPath, size_t size) |
1835 | { |
1836 | Allocator allocator = allocatorForNonVirtualConcurrently<ClassType>(vm, size, AllocatorForMode::AllocatorIfExists); |
1837 | emitAllocateJSObject(resultGPR, JITAllocator::constant(allocator), scratchGPR1, structure, storage, scratchGPR2, slowPath); |
1838 | } |
1839 | |
1840 | template<typename ClassType, typename StructureType, typename StorageType> |
1841 | void emitAllocateJSObject(VM& vm, GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath) |
1842 | { |
1843 | emitAllocateJSObjectWithKnownSize<ClassType>(vm, resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, ClassType::allocationSize(0)); |
1844 | } |
1845 | |
1846 | // allocationSize can be aliased with any of the other input GPRs. If it's not aliased then it |
1847 | // won't be clobbered. |
1848 | void emitAllocateVariableSized(GPRReg resultGPR, CompleteSubspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath); |
1849 | |
1850 | template<typename ClassType, typename StructureType> |
1851 | void emitAllocateVariableSizedCell(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath) |
1852 | { |
1853 | CompleteSubspace* subspace = subspaceForConcurrently<ClassType>(vm); |
1854 | RELEASE_ASSERT_WITH_MESSAGE(subspace, "CompleteSubspace is always allocated" ); |
1855 | emitAllocateVariableSized(resultGPR, *subspace, allocationSize, scratchGPR1, scratchGPR2, slowPath); |
1856 | emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR2); |
1857 | } |
1858 | |
1859 | template<typename ClassType, typename StructureType> |
1860 | void emitAllocateVariableSizedJSObject(VM& vm, GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath) |
1861 | { |
1862 | emitAllocateVariableSizedCell<ClassType>(vm, resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath); |
1863 | storePtr(TrustedImmPtr(nullptr), Address(resultGPR, JSObject::butterflyOffset())); |
1864 | } |
1865 | |
1866 | JumpList branchIfValue(VM&, JSValueRegs, GPRReg scratch, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult); |
1867 | JumpList branchIfTruthy(VM& vm, JSValueRegs value, GPRReg scratch, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg scratchFPR0, FPRReg scratchFPR1, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject) |
1868 | { |
1869 | return branchIfValue(vm, value, scratch, scratchIfShouldCheckMasqueradesAsUndefined, scratchFPR0, scratchFPR1, shouldCheckMasqueradesAsUndefined, globalObject, false); |
1870 | } |
1871 | JumpList branchIfFalsey(VM& vm, JSValueRegs value, GPRReg scratch, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg scratchFPR0, FPRReg scratchFPR1, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject) |
1872 | { |
1873 | return branchIfValue(vm, value, scratch, scratchIfShouldCheckMasqueradesAsUndefined, scratchFPR0, scratchFPR1, shouldCheckMasqueradesAsUndefined, globalObject, true); |
1874 | } |
1875 | void emitConvertValueToBoolean(VM&, JSValueRegs, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult = false); |
1876 | |
1877 | template<typename ClassType> |
1878 | void emitAllocateDestructibleObject(VM& vm, GPRReg resultGPR, Structure* structure, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath) |
1879 | { |
1880 | auto butterfly = TrustedImmPtr(nullptr); |
1881 | emitAllocateJSObject<ClassType>(vm, resultGPR, TrustedImmPtr(structure), butterfly, scratchGPR1, scratchGPR2, slowPath); |
1882 | storePtr(TrustedImmPtr(structure->classInfo()), Address(resultGPR, JSDestructibleObject::classInfoOffset())); |
1883 | } |
1884 | |
1885 | void emitInitializeInlineStorage(GPRReg baseGPR, unsigned inlineCapacity) |
1886 | { |
1887 | for (unsigned i = 0; i < inlineCapacity; ++i) |
1888 | storeTrustedValue(JSValue(), Address(baseGPR, JSObject::offsetOfInlineStorage() + i * sizeof(EncodedJSValue))); |
1889 | } |
1890 | |
1891 | void emitInitializeInlineStorage(GPRReg baseGPR, GPRReg inlineCapacity) |
1892 | { |
1893 | Jump empty = branchTest32(Zero, inlineCapacity); |
1894 | Label loop = label(); |
1895 | sub32(TrustedImm32(1), inlineCapacity); |
1896 | storeTrustedValue(JSValue(), BaseIndex(baseGPR, inlineCapacity, TimesEight, JSObject::offsetOfInlineStorage())); |
1897 | branchTest32(NonZero, inlineCapacity).linkTo(loop, this); |
1898 | empty.link(this); |
1899 | } |
1900 | |
1901 | void emitInitializeOutOfLineStorage(GPRReg butterflyGPR, unsigned outOfLineCapacity) |
1902 | { |
1903 | for (unsigned i = 0; i < outOfLineCapacity; ++i) |
1904 | storeTrustedValue(JSValue(), Address(butterflyGPR, -sizeof(IndexingHeader) - (i + 1) * sizeof(EncodedJSValue))); |
1905 | } |
1906 | |
1907 | #if USE(JSVALUE64) |
1908 | void wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch); |
1909 | #endif |
1910 | |
1911 | #if ENABLE(WEBASSEMBLY) |
1912 | void loadWasmContextInstance(GPRReg dst); |
1913 | void storeWasmContextInstance(GPRReg src); |
1914 | static bool loadWasmContextInstanceNeedsMacroScratchRegister(); |
1915 | static bool storeWasmContextInstanceNeedsMacroScratchRegister(); |
1916 | #endif |
1917 | |
1918 | protected: |
1919 | void copyCalleeSavesToEntryFrameCalleeSavesBufferImpl(GPRReg calleeSavesBuffer); |
1920 | |
1921 | CodeBlock* m_codeBlock; |
1922 | CodeBlock* m_baselineCodeBlock; |
1923 | }; |
1924 | |
1925 | } // namespace JSC |
1926 | |
1927 | #endif // ENABLE(JIT) |
1928 | |