1/*
2 * Copyright (C) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27
28#include "CCallHelpers.h"
29#include "CPU.h"
30#include "FPRInfo.h"
31#include "GPRInfo.h"
32#include "InitializeThreading.h"
33#include "LinkBuffer.h"
34#include "ProbeContext.h"
35#include "StackAlignment.h"
36#include <limits>
37#include <wtf/Compiler.h>
38#include <wtf/DataLog.h>
39#include <wtf/Function.h>
40#include <wtf/Lock.h>
41#include <wtf/NumberOfCores.h>
42#include <wtf/PtrTag.h>
43#include <wtf/Threading.h>
44#include <wtf/text/StringCommon.h>
45
46// We don't have a NO_RETURN_DUE_TO_EXIT, nor should we. That's ridiculous.
47static bool hiddenTruthBecauseNoReturnIsStupid() { return true; }
48
49static void usage()
50{
51 dataLog("Usage: testmasm [<filter>]\n");
52 if (hiddenTruthBecauseNoReturnIsStupid())
53 exit(1);
54}
55
56#if ENABLE(JIT)
57
58#if ENABLE(MASM_PROBE)
59namespace WTF {
60
61static void printInternal(PrintStream& out, void* value)
62{
63 out.printf("%p", value);
64}
65
66} // namespace WTF
67#endif // ENABLE(MASM_PROBE)
68
69namespace JSC {
70namespace Probe {
71
72JS_EXPORT_PRIVATE void* probeStateForContext(Probe::Context&);
73
74} // namespace Probe
75} // namespace JSC
76
77using namespace JSC;
78
79namespace {
80
81#if ENABLE(MASM_PROBE)
82using CPUState = Probe::CPUState;
83#endif
84
85Lock crashLock;
86
87typedef WTF::Function<void(CCallHelpers&)> Generator;
88
89template<typename T> T nextID(T id) { return static_cast<T>(id + 1); }
90
91#define TESTWORD64 0x0c0defefebeef000
92#define TESTWORD32 0x0beef000
93
94#define testWord32(x) (TESTWORD32 + static_cast<uint32_t>(x))
95#define testWord64(x) (TESTWORD64 + static_cast<uint64_t>(x))
96
97#if USE(JSVALUE64)
98#define testWord(x) testWord64(x)
99#else
100#define testWord(x) testWord32(x)
101#endif
102
103// Nothing fancy for now; we just use the existing WTF assertion machinery.
104#define CHECK_EQ(_actual, _expected) do { \
105 if ((_actual) == (_expected)) \
106 break; \
107 crashLock.lock(); \
108 dataLog("FAILED while testing " #_actual ": expected: ", _expected, ", actual: ", _actual, "\n"); \
109 WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, "CHECK_EQ("#_actual ", " #_expected ")"); \
110 CRASH(); \
111 } while (false)
112
113#define CHECK_NOT_EQ(_actual, _expected) do { \
114 if ((_actual) != (_expected)) \
115 break; \
116 crashLock.lock(); \
117 dataLog("FAILED while testing " #_actual ": expected not: ", _expected, ", actual: ", _actual, "\n"); \
118 WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, "CHECK_NOT_EQ("#_actual ", " #_expected ")"); \
119 CRASH(); \
120 } while (false)
121
122#if ENABLE(MASM_PROBE)
123bool isPC(MacroAssembler::RegisterID id)
124{
125#if CPU(ARM_THUMB2)
126 return id == ARMRegisters::pc;
127#else
128 UNUSED_PARAM(id);
129 return false;
130#endif
131}
132
133bool isSP(MacroAssembler::RegisterID id)
134{
135 return id == MacroAssembler::stackPointerRegister;
136}
137
138bool isFP(MacroAssembler::RegisterID id)
139{
140 return id == MacroAssembler::framePointerRegister;
141}
142
143bool isSpecialGPR(MacroAssembler::RegisterID id)
144{
145 if (isPC(id) || isSP(id) || isFP(id))
146 return true;
147#if CPU(ARM64)
148 if (id == ARM64Registers::x18)
149 return true;
150#elif CPU(MIPS)
151 if (id == MIPSRegisters::zero || id == MIPSRegisters::k0 || id == MIPSRegisters::k1)
152 return true;
153#endif
154 return false;
155}
156#endif // ENABLE(MASM_PROBE)
157
158MacroAssemblerCodeRef<JSEntryPtrTag> compile(Generator&& generate)
159{
160 CCallHelpers jit;
161 generate(jit);
162 LinkBuffer linkBuffer(jit, nullptr);
163 return FINALIZE_CODE(linkBuffer, JSEntryPtrTag, "testmasm compilation");
164}
165
166template<typename T, typename... Arguments>
167T invoke(const MacroAssemblerCodeRef<JSEntryPtrTag>& code, Arguments... arguments)
168{
169 void* executableAddress = untagCFunctionPtr<JSEntryPtrTag>(code.code().executableAddress());
170 T (*function)(Arguments...) = bitwise_cast<T(*)(Arguments...)>(executableAddress);
171 return function(arguments...);
172}
173
174template<typename T, typename... Arguments>
175T compileAndRun(Generator&& generator, Arguments... arguments)
176{
177 return invoke<T>(compile(WTFMove(generator)), arguments...);
178}
179
180void emitFunctionPrologue(CCallHelpers& jit)
181{
182 jit.emitFunctionPrologue();
183#if CPU(ARM_THUMB2)
184 // MacroAssemblerARMv7 uses r6 as a temporary register, which is a
185 // callee-saved register, see 5.1.1 of the Procedure Call Standard for
186 // the ARM Architecture.
187 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0042f/IHI0042F_aapcs.pdf
188 jit.push(ARMRegisters::r6);
189#endif
190}
191
192void emitFunctionEpilogue(CCallHelpers& jit)
193{
194#if CPU(ARM_THUMB2)
195 jit.pop(ARMRegisters::r6);
196#endif
197 jit.emitFunctionEpilogue();
198}
199
200void testSimple()
201{
202 CHECK_EQ(compileAndRun<int>([] (CCallHelpers& jit) {
203 emitFunctionPrologue(jit);
204 jit.move(CCallHelpers::TrustedImm32(42), GPRInfo::returnValueGPR);
205 emitFunctionEpilogue(jit);
206 jit.ret();
207 }), 42);
208}
209
210void testGetEffectiveAddress(size_t pointer, ptrdiff_t length, int32_t offset, CCallHelpers::Scale scale)
211{
212 CHECK_EQ(compileAndRun<size_t>([=] (CCallHelpers& jit) {
213 emitFunctionPrologue(jit);
214 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(pointer)), GPRInfo::regT0);
215 jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(length)), GPRInfo::regT1);
216 jit.getEffectiveAddress(CCallHelpers::BaseIndex(GPRInfo::regT0, GPRInfo::regT1, scale, offset), GPRInfo::returnValueGPR);
217 emitFunctionEpilogue(jit);
218 jit.ret();
219 }), pointer + offset + (static_cast<size_t>(1) << static_cast<int>(scale)) * length);
220}
221
222// branchTruncateDoubleToInt32(), when encountering Infinity, -Infinity or a
223// Nan, should either yield 0 in dest or fail.
224void testBranchTruncateDoubleToInt32(double val, int32_t expected)
225{
226 const uint64_t valAsUInt = *reinterpret_cast<uint64_t*>(&val);
227#if CPU(BIG_ENDIAN)
228 const bool isBigEndian = true;
229#else
230 const bool isBigEndian = false;
231#endif
232 CHECK_EQ(compileAndRun<int>([&] (CCallHelpers& jit) {
233 emitFunctionPrologue(jit);
234 jit.subPtr(CCallHelpers::TrustedImm32(stackAlignmentBytes()), MacroAssembler::stackPointerRegister);
235 if (isBigEndian) {
236 jit.store32(CCallHelpers::TrustedImm32(valAsUInt >> 32),
237 MacroAssembler::stackPointerRegister);
238 jit.store32(CCallHelpers::TrustedImm32(valAsUInt & 0xffffffff),
239 MacroAssembler::Address(MacroAssembler::stackPointerRegister, 4));
240 } else {
241 jit.store32(CCallHelpers::TrustedImm32(valAsUInt & 0xffffffff),
242 MacroAssembler::stackPointerRegister);
243 jit.store32(CCallHelpers::TrustedImm32(valAsUInt >> 32),
244 MacroAssembler::Address(MacroAssembler::stackPointerRegister, 4));
245 }
246 jit.loadDouble(MacroAssembler::stackPointerRegister, FPRInfo::fpRegT0);
247
248 MacroAssembler::Jump done;
249 done = jit.branchTruncateDoubleToInt32(FPRInfo::fpRegT0, GPRInfo::returnValueGPR, MacroAssembler::BranchIfTruncateSuccessful);
250
251 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::returnValueGPR);
252
253 done.link(&jit);
254 jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentBytes()), MacroAssembler::stackPointerRegister);
255 emitFunctionEpilogue(jit);
256 jit.ret();
257 }), expected);
258}
259
260
261static Vector<double> doubleOperands()
262{
263 return Vector<double> {
264 0,
265 -0,
266 1,
267 -1,
268 42,
269 -42,
270 std::numeric_limits<double>::max(),
271 std::numeric_limits<double>::min(),
272 std::numeric_limits<double>::lowest(),
273 std::numeric_limits<double>::quiet_NaN(),
274 std::numeric_limits<double>::infinity(),
275 -std::numeric_limits<double>::infinity(),
276 };
277}
278
279
280#if CPU(X86) || CPU(X86_64) || CPU(ARM64)
281static Vector<float> floatOperands()
282{
283 return Vector<float> {
284 0,
285 -0,
286 1,
287 -1,
288 42,
289 -42,
290 std::numeric_limits<float>::max(),
291 std::numeric_limits<float>::min(),
292 std::numeric_limits<float>::lowest(),
293 std::numeric_limits<float>::quiet_NaN(),
294 std::numeric_limits<float>::infinity(),
295 -std::numeric_limits<float>::infinity(),
296 };
297}
298#endif
299
300static Vector<int32_t> int32Operands()
301{
302 return Vector<int32_t> {
303 0,
304 1,
305 -1,
306 2,
307 -2,
308 42,
309 -42,
310 64,
311 std::numeric_limits<int32_t>::max(),
312 std::numeric_limits<int32_t>::min(),
313 };
314}
315
316#if CPU(X86_64)
317static Vector<int64_t> int64Operands()
318{
319 return Vector<int64_t> {
320 0,
321 1,
322 -1,
323 2,
324 -2,
325 42,
326 -42,
327 64,
328 std::numeric_limits<int32_t>::max(),
329 std::numeric_limits<int32_t>::min(),
330 std::numeric_limits<int64_t>::max(),
331 std::numeric_limits<int64_t>::min(),
332 };
333}
334#endif
335
336#if CPU(X86_64)
337void testBranchTestBit32RegReg()
338{
339 for (auto value : int32Operands()) {
340 auto test = compile([=] (CCallHelpers& jit) {
341 emitFunctionPrologue(jit);
342
343 auto branch = jit.branchTestBit32(MacroAssembler::NonZero, GPRInfo::argumentGPR0, GPRInfo::argumentGPR1);
344 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::returnValueGPR);
345 auto done = jit.jump();
346 branch.link(&jit);
347 jit.move(CCallHelpers::TrustedImm32(1), GPRInfo::returnValueGPR);
348 done.link(&jit);
349
350 emitFunctionEpilogue(jit);
351 jit.ret();
352 });
353
354 for (auto value2 : int32Operands())
355 CHECK_EQ(invoke<int>(test, value, value2), (value>>(value2%32))&1);
356 }
357}
358
359void testBranchTestBit32RegImm()
360{
361 for (auto value : int32Operands()) {
362 auto test = compile([=] (CCallHelpers& jit) {
363 emitFunctionPrologue(jit);
364
365 auto branch = jit.branchTestBit32(MacroAssembler::NonZero, GPRInfo::argumentGPR0, CCallHelpers::TrustedImm32(value));
366 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::returnValueGPR);
367 auto done = jit.jump();
368 branch.link(&jit);
369 jit.move(CCallHelpers::TrustedImm32(1), GPRInfo::returnValueGPR);
370 done.link(&jit);
371
372 emitFunctionEpilogue(jit);
373 jit.ret();
374 });
375
376 for (auto value2 : int32Operands())
377 CHECK_EQ(invoke<int>(test, value2), (value2>>(value%32))&1);
378 }
379}
380
381void testBranchTestBit32AddrImm()
382{
383 for (auto value : int32Operands()) {
384 auto test = compile([=] (CCallHelpers& jit) {
385 emitFunctionPrologue(jit);
386
387 auto branch = jit.branchTestBit32(MacroAssembler::NonZero, MacroAssembler::Address(GPRInfo::argumentGPR0, 0), CCallHelpers::TrustedImm32(value));
388 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::returnValueGPR);
389 auto done = jit.jump();
390 branch.link(&jit);
391 jit.move(CCallHelpers::TrustedImm32(1), GPRInfo::returnValueGPR);
392 done.link(&jit);
393
394 emitFunctionEpilogue(jit);
395 jit.ret();
396 });
397
398 for (auto value2 : int32Operands())
399 CHECK_EQ(invoke<int>(test, &value2), (value2>>(value%32))&1);
400 }
401}
402
403void testBranchTestBit64RegReg()
404{
405 for (auto value : int64Operands()) {
406 auto test = compile([=] (CCallHelpers& jit) {
407 emitFunctionPrologue(jit);
408
409 auto branch = jit.branchTestBit64(MacroAssembler::NonZero, GPRInfo::argumentGPR0, GPRInfo::argumentGPR1);
410 jit.move(CCallHelpers::TrustedImm64(0), GPRInfo::returnValueGPR);
411 auto done = jit.jump();
412 branch.link(&jit);
413 jit.move(CCallHelpers::TrustedImm64(1), GPRInfo::returnValueGPR);
414 done.link(&jit);
415
416 emitFunctionEpilogue(jit);
417 jit.ret();
418 });
419
420 for (auto value2 : int64Operands())
421 CHECK_EQ(invoke<long int>(test, value, value2), (value>>(value2%64))&1);
422 }
423}
424
425void testBranchTestBit64RegImm()
426{
427 for (auto value : int64Operands()) {
428 auto test = compile([=] (CCallHelpers& jit) {
429 emitFunctionPrologue(jit);
430
431 auto branch = jit.branchTestBit64(MacroAssembler::NonZero, GPRInfo::argumentGPR0, CCallHelpers::TrustedImm32(value));
432 jit.move(CCallHelpers::TrustedImm64(0), GPRInfo::returnValueGPR);
433 auto done = jit.jump();
434 branch.link(&jit);
435 jit.move(CCallHelpers::TrustedImm64(1), GPRInfo::returnValueGPR);
436 done.link(&jit);
437
438 emitFunctionEpilogue(jit);
439 jit.ret();
440 });
441
442 for (auto value2 : int64Operands())
443 CHECK_EQ(invoke<long int>(test, value2), (value2>>(value%64))&1);
444 }
445}
446
447void testBranchTestBit64AddrImm()
448{
449 for (auto value : int64Operands()) {
450 auto test = compile([=] (CCallHelpers& jit) {
451 emitFunctionPrologue(jit);
452
453 auto branch = jit.branchTestBit64(MacroAssembler::NonZero, MacroAssembler::Address(GPRInfo::argumentGPR0, 0), CCallHelpers::TrustedImm32(value));
454 jit.move(CCallHelpers::TrustedImm64(0), GPRInfo::returnValueGPR);
455 auto done = jit.jump();
456 branch.link(&jit);
457 jit.move(CCallHelpers::TrustedImm64(1), GPRInfo::returnValueGPR);
458 done.link(&jit);
459
460 emitFunctionEpilogue(jit);
461 jit.ret();
462 });
463
464 for (auto value2 : int64Operands())
465 CHECK_EQ(invoke<long int>(test, &value2), (value2>>(value%64))&1);
466 }
467}
468
469#endif
470
471void testCompareDouble(MacroAssembler::DoubleCondition condition)
472{
473 double arg1 = 0;
474 double arg2 = 0;
475
476 auto compareDouble = compile([&, condition] (CCallHelpers& jit) {
477 emitFunctionPrologue(jit);
478
479 jit.loadDouble(CCallHelpers::TrustedImmPtr(&arg1), FPRInfo::fpRegT0);
480 jit.loadDouble(CCallHelpers::TrustedImmPtr(&arg2), FPRInfo::fpRegT1);
481 jit.move(CCallHelpers::TrustedImm32(-1), GPRInfo::returnValueGPR);
482 jit.compareDouble(condition, FPRInfo::fpRegT0, FPRInfo::fpRegT1, GPRInfo::returnValueGPR);
483
484 emitFunctionEpilogue(jit);
485 jit.ret();
486 });
487
488 auto compareDoubleGeneric = compile([&, condition] (CCallHelpers& jit) {
489 emitFunctionPrologue(jit);
490
491 jit.loadDouble(CCallHelpers::TrustedImmPtr(&arg1), FPRInfo::fpRegT0);
492 jit.loadDouble(CCallHelpers::TrustedImmPtr(&arg2), FPRInfo::fpRegT1);
493 jit.move(CCallHelpers::TrustedImm32(1), GPRInfo::returnValueGPR);
494 auto jump = jit.branchDouble(condition, FPRInfo::fpRegT0, FPRInfo::fpRegT1);
495 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::returnValueGPR);
496 jump.link(&jit);
497
498 emitFunctionEpilogue(jit);
499 jit.ret();
500 });
501
502 auto operands = doubleOperands();
503 for (auto a : operands) {
504 for (auto b : operands) {
505 arg1 = a;
506 arg2 = b;
507 CHECK_EQ(invoke<int>(compareDouble), invoke<int>(compareDoubleGeneric));
508 }
509 }
510}
511
512void testMul32WithImmediates()
513{
514 for (auto immediate : int32Operands()) {
515 auto mul = compile([=] (CCallHelpers& jit) {
516 emitFunctionPrologue(jit);
517
518 jit.mul32(CCallHelpers::TrustedImm32(immediate), GPRInfo::argumentGPR0, GPRInfo::returnValueGPR);
519
520 emitFunctionEpilogue(jit);
521 jit.ret();
522 });
523
524 for (auto value : int32Operands())
525 CHECK_EQ(invoke<int>(mul, value), immediate * value);
526 }
527}
528
529#if CPU(ARM64)
530void testMul32SignExtend()
531{
532 for (auto value : int32Operands()) {
533 auto mul = compile([=] (CCallHelpers& jit) {
534 emitFunctionPrologue(jit);
535
536 jit.multiplySignExtend32(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::returnValueGPR);
537
538 emitFunctionEpilogue(jit);
539 jit.ret();
540 });
541
542 for (auto value2 : int32Operands())
543 CHECK_EQ(invoke<long int>(mul, value, value2), ((long int) value) * ((long int) value2));
544 }
545}
546#endif
547
548#if CPU(X86) || CPU(X86_64) || CPU(ARM64)
549void testCompareFloat(MacroAssembler::DoubleCondition condition)
550{
551 float arg1 = 0;
552 float arg2 = 0;
553
554 auto compareFloat = compile([&, condition] (CCallHelpers& jit) {
555 emitFunctionPrologue(jit);
556
557 jit.loadFloat(CCallHelpers::TrustedImmPtr(&arg1), FPRInfo::fpRegT0);
558 jit.loadFloat(CCallHelpers::TrustedImmPtr(&arg2), FPRInfo::fpRegT1);
559 jit.move(CCallHelpers::TrustedImm32(-1), GPRInfo::returnValueGPR);
560 jit.compareFloat(condition, FPRInfo::fpRegT0, FPRInfo::fpRegT1, GPRInfo::returnValueGPR);
561
562 emitFunctionEpilogue(jit);
563 jit.ret();
564 });
565
566 auto compareFloatGeneric = compile([&, condition] (CCallHelpers& jit) {
567 emitFunctionPrologue(jit);
568
569 jit.loadFloat(CCallHelpers::TrustedImmPtr(&arg1), FPRInfo::fpRegT0);
570 jit.loadFloat(CCallHelpers::TrustedImmPtr(&arg2), FPRInfo::fpRegT1);
571 jit.move(CCallHelpers::TrustedImm32(1), GPRInfo::returnValueGPR);
572 auto jump = jit.branchFloat(condition, FPRInfo::fpRegT0, FPRInfo::fpRegT1);
573 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::returnValueGPR);
574 jump.link(&jit);
575
576 emitFunctionEpilogue(jit);
577 jit.ret();
578 });
579
580 auto operands = floatOperands();
581 for (auto a : operands) {
582 for (auto b : operands) {
583 arg1 = a;
584 arg2 = b;
585 CHECK_EQ(invoke<int>(compareFloat), invoke<int>(compareFloatGeneric));
586 }
587 }
588}
589#endif
590
591#if ENABLE(MASM_PROBE)
592void testProbeReadsArgumentRegisters()
593{
594 bool probeWasCalled = false;
595 compileAndRun<void>([&] (CCallHelpers& jit) {
596 emitFunctionPrologue(jit);
597
598 jit.pushPair(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1);
599 jit.pushPair(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
600
601 jit.move(CCallHelpers::TrustedImm32(testWord32(0)), GPRInfo::argumentGPR0);
602 jit.convertInt32ToDouble(GPRInfo::argumentGPR0, FPRInfo::fpRegT0);
603 jit.move(CCallHelpers::TrustedImm32(testWord32(1)), GPRInfo::argumentGPR0);
604 jit.convertInt32ToDouble(GPRInfo::argumentGPR0, FPRInfo::fpRegT1);
605#if USE(JSVALUE64)
606 jit.move(CCallHelpers::TrustedImm64(testWord(0)), GPRInfo::argumentGPR0);
607 jit.move(CCallHelpers::TrustedImm64(testWord(1)), GPRInfo::argumentGPR1);
608 jit.move(CCallHelpers::TrustedImm64(testWord(2)), GPRInfo::argumentGPR2);
609 jit.move(CCallHelpers::TrustedImm64(testWord(3)), GPRInfo::argumentGPR3);
610#else
611 jit.move(CCallHelpers::TrustedImm32(testWord(0)), GPRInfo::argumentGPR0);
612 jit.move(CCallHelpers::TrustedImm32(testWord(1)), GPRInfo::argumentGPR1);
613 jit.move(CCallHelpers::TrustedImm32(testWord(2)), GPRInfo::argumentGPR2);
614 jit.move(CCallHelpers::TrustedImm32(testWord(3)), GPRInfo::argumentGPR3);
615#endif
616
617 jit.probe([&] (Probe::Context& context) {
618 auto& cpu = context.cpu;
619 probeWasCalled = true;
620 CHECK_EQ(cpu.gpr(GPRInfo::argumentGPR0), testWord(0));
621 CHECK_EQ(cpu.gpr(GPRInfo::argumentGPR1), testWord(1));
622 CHECK_EQ(cpu.gpr(GPRInfo::argumentGPR2), testWord(2));
623 CHECK_EQ(cpu.gpr(GPRInfo::argumentGPR3), testWord(3));
624
625 CHECK_EQ(cpu.fpr(FPRInfo::fpRegT0), testWord32(0));
626 CHECK_EQ(cpu.fpr(FPRInfo::fpRegT1), testWord32(1));
627 });
628
629 jit.popPair(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
630 jit.popPair(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1);
631
632 emitFunctionEpilogue(jit);
633 jit.ret();
634 });
635 CHECK_EQ(probeWasCalled, true);
636}
637
638void testProbeWritesArgumentRegisters()
639{
640 // This test relies on testProbeReadsArgumentRegisters() having already validated
641 // that we can read from argument registers. We'll use that ability to validate
642 // that our writes did take effect.
643 unsigned probeCallCount = 0;
644 compileAndRun<void>([&] (CCallHelpers& jit) {
645 emitFunctionPrologue(jit);
646
647 jit.pushPair(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1);
648 jit.pushPair(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
649
650 // Pre-initialize with non-expected values.
651#if USE(JSVALUE64)
652 jit.move(CCallHelpers::TrustedImm64(0), GPRInfo::argumentGPR0);
653 jit.move(CCallHelpers::TrustedImm64(0), GPRInfo::argumentGPR1);
654 jit.move(CCallHelpers::TrustedImm64(0), GPRInfo::argumentGPR2);
655 jit.move(CCallHelpers::TrustedImm64(0), GPRInfo::argumentGPR3);
656#else
657 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::argumentGPR0);
658 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::argumentGPR1);
659 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::argumentGPR2);
660 jit.move(CCallHelpers::TrustedImm32(0), GPRInfo::argumentGPR3);
661#endif
662 jit.convertInt32ToDouble(GPRInfo::argumentGPR0, FPRInfo::fpRegT0);
663 jit.convertInt32ToDouble(GPRInfo::argumentGPR0, FPRInfo::fpRegT1);
664
665 // Write expected values.
666 jit.probe([&] (Probe::Context& context) {
667 auto& cpu = context.cpu;
668 probeCallCount++;
669 cpu.gpr(GPRInfo::argumentGPR0) = testWord(0);
670 cpu.gpr(GPRInfo::argumentGPR1) = testWord(1);
671 cpu.gpr(GPRInfo::argumentGPR2) = testWord(2);
672 cpu.gpr(GPRInfo::argumentGPR3) = testWord(3);
673
674 cpu.fpr(FPRInfo::fpRegT0) = bitwise_cast<double>(testWord64(0));
675 cpu.fpr(FPRInfo::fpRegT1) = bitwise_cast<double>(testWord64(1));
676 });
677
678 // Validate that expected values were written.
679 jit.probe([&] (Probe::Context& context) {
680 auto& cpu = context.cpu;
681 probeCallCount++;
682 CHECK_EQ(cpu.gpr(GPRInfo::argumentGPR0), testWord(0));
683 CHECK_EQ(cpu.gpr(GPRInfo::argumentGPR1), testWord(1));
684 CHECK_EQ(cpu.gpr(GPRInfo::argumentGPR2), testWord(2));
685 CHECK_EQ(cpu.gpr(GPRInfo::argumentGPR3), testWord(3));
686
687 CHECK_EQ(cpu.fpr<uint64_t>(FPRInfo::fpRegT0), testWord64(0));
688 CHECK_EQ(cpu.fpr<uint64_t>(FPRInfo::fpRegT1), testWord64(1));
689 });
690
691 jit.popPair(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
692 jit.popPair(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1);
693
694 emitFunctionEpilogue(jit);
695 jit.ret();
696 });
697 CHECK_EQ(probeCallCount, 2);
698}
699
700static NEVER_INLINE NOT_TAIL_CALLED int testFunctionToTrashGPRs(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j)
701{
702 if (j > 0)
703 return testFunctionToTrashGPRs(a + 1, b + a, c + b, d + 5, e - a, f * 1.5, g ^ a, h - b, i, j - 1);
704 return a + 1;
705}
706static NEVER_INLINE NOT_TAIL_CALLED double testFunctionToTrashFPRs(double a, double b, double c, double d, double e, double f, double g, double h, double i, double j)
707{
708 if (j > 0)
709 return testFunctionToTrashFPRs(a + 1, b + a, c + b, d + 5, e - a, f * 1.5, pow(g, a), h - b, i, j - 1);
710 return a + 1;
711}
712
713void testProbePreservesGPRS()
714{
715 // This test relies on testProbeReadsArgumentRegisters() and testProbeWritesArgumentRegisters()
716 // having already validated that we can read and write from registers. We'll use these abilities
717 // to validate that the probe preserves register values.
718 unsigned probeCallCount = 0;
719 CPUState originalState;
720
721 compileAndRun<void>([&] (CCallHelpers& jit) {
722 emitFunctionPrologue(jit);
723
724 // Write expected values into the registers (except for sp, fp, and pc).
725 jit.probe([&] (Probe::Context& context) {
726 auto& cpu = context.cpu;
727 probeCallCount++;
728 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
729 originalState.gpr(id) = cpu.gpr(id);
730 if (isSpecialGPR(id))
731 continue;
732 cpu.gpr(id) = testWord(static_cast<int>(id));
733 }
734 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) {
735 originalState.fpr(id) = cpu.fpr(id);
736 cpu.fpr(id) = bitwise_cast<double>(testWord64(id));
737 }
738 });
739
740 // Invoke the probe to call a lot of functions and trash register values.
741 jit.probe([&] (Probe::Context&) {
742 probeCallCount++;
743 CHECK_EQ(testFunctionToTrashGPRs(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), 10);
744 CHECK_EQ(testFunctionToTrashFPRs(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), 10);
745 });
746
747 // Validate that the registers have the expected values.
748 jit.probe([&] (Probe::Context& context) {
749 auto& cpu = context.cpu;
750 probeCallCount++;
751 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
752 if (isSP(id) || isFP(id)) {
753 CHECK_EQ(cpu.gpr(id), originalState.gpr(id));
754 continue;
755 }
756 if (isSpecialGPR(id))
757 continue;
758 CHECK_EQ(cpu.gpr(id), testWord(id));
759 }
760 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id))
761#if CPU(MIPS)
762 if (!(id & 1))
763#endif
764 CHECK_EQ(cpu.fpr<uint64_t>(id), testWord64(id));
765 });
766
767 // Restore the original state.
768 jit.probe([&] (Probe::Context& context) {
769 auto& cpu = context.cpu;
770 probeCallCount++;
771 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
772 if (isSpecialGPR(id))
773 continue;
774 cpu.gpr(id) = originalState.gpr(id);
775 }
776 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id))
777 cpu.fpr(id) = originalState.fpr(id);
778 });
779
780 // Validate that the original state was restored.
781 jit.probe([&] (Probe::Context& context) {
782 auto& cpu = context.cpu;
783 probeCallCount++;
784 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
785 if (isSpecialGPR(id))
786 continue;
787 CHECK_EQ(cpu.gpr(id), originalState.gpr(id));
788 }
789 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id))
790#if CPU(MIPS)
791 if (!(id & 1))
792#endif
793 CHECK_EQ(cpu.fpr<uint64_t>(id), originalState.fpr<uint64_t>(id));
794 });
795
796 emitFunctionEpilogue(jit);
797 jit.ret();
798 });
799 CHECK_EQ(probeCallCount, 5);
800}
801
802void testProbeModifiesStackPointer(WTF::Function<void*(Probe::Context&)> computeModifiedStackPointer)
803{
804 unsigned probeCallCount = 0;
805 CPUState originalState;
806 void* originalSP { nullptr };
807 void* modifiedSP { nullptr };
808#if !(CPU(MIPS))
809 uintptr_t modifiedFlags { 0 };
810#endif
811
812#if CPU(X86) || CPU(X86_64)
813 auto flagsSPR = X86Registers::eflags;
814 uintptr_t flagsMask = 0xc5;
815#elif CPU(ARM_THUMB2)
816 auto flagsSPR = ARMRegisters::apsr;
817 uintptr_t flagsMask = 0xf8000000;
818#elif CPU(ARM64)
819 auto flagsSPR = ARM64Registers::nzcv;
820 uintptr_t flagsMask = 0xf0000000;
821#endif
822
823 compileAndRun<void>([&] (CCallHelpers& jit) {
824 emitFunctionPrologue(jit);
825
826 // Preserve original stack pointer and modify the sp, and
827 // write expected values into other registers (except for fp, and pc).
828 jit.probe([&] (Probe::Context& context) {
829 auto& cpu = context.cpu;
830 probeCallCount++;
831 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
832 originalState.gpr(id) = cpu.gpr(id);
833 if (isSpecialGPR(id))
834 continue;
835 cpu.gpr(id) = testWord(static_cast<int>(id));
836 }
837 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) {
838 originalState.fpr(id) = cpu.fpr(id);
839 cpu.fpr(id) = bitwise_cast<double>(testWord64(id));
840 }
841
842#if !(CPU(MIPS))
843 originalState.spr(flagsSPR) = cpu.spr(flagsSPR);
844 modifiedFlags = originalState.spr(flagsSPR) ^ flagsMask;
845 cpu.spr(flagsSPR) = modifiedFlags;
846#endif
847
848 originalSP = cpu.sp();
849 modifiedSP = computeModifiedStackPointer(context);
850 cpu.sp() = modifiedSP;
851 });
852
853 // Validate that the registers have the expected values.
854 jit.probe([&] (Probe::Context& context) {
855 auto& cpu = context.cpu;
856 probeCallCount++;
857 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
858 if (isFP(id)) {
859 CHECK_EQ(cpu.gpr(id), originalState.gpr(id));
860 continue;
861 }
862 if (isSpecialGPR(id))
863 continue;
864 CHECK_EQ(cpu.gpr(id), testWord(id));
865 }
866 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id))
867#if CPU(MIPS)
868 if (!(id & 1))
869#endif
870 CHECK_EQ(cpu.fpr<uint64_t>(id), testWord64(id));
871#if !(CPU(MIPS))
872 CHECK_EQ(cpu.spr(flagsSPR) & flagsMask, modifiedFlags & flagsMask);
873#endif
874 CHECK_EQ(cpu.sp(), modifiedSP);
875 });
876
877 // Restore the original state.
878 jit.probe([&] (Probe::Context& context) {
879 auto& cpu = context.cpu;
880 probeCallCount++;
881 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
882 if (isSpecialGPR(id))
883 continue;
884 cpu.gpr(id) = originalState.gpr(id);
885 }
886 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id))
887 cpu.fpr(id) = originalState.fpr(id);
888#if !(CPU(MIPS))
889 cpu.spr(flagsSPR) = originalState.spr(flagsSPR);
890#endif
891 cpu.sp() = originalSP;
892 });
893
894 // Validate that the original state was restored.
895 jit.probe([&] (Probe::Context& context) {
896 auto& cpu = context.cpu;
897 probeCallCount++;
898 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
899 if (isSpecialGPR(id))
900 continue;
901 CHECK_EQ(cpu.gpr(id), originalState.gpr(id));
902 }
903 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id))
904#if CPU(MIPS)
905 if (!(id & 1))
906#endif
907 CHECK_EQ(cpu.fpr<uint64_t>(id), originalState.fpr<uint64_t>(id));
908#if !(CPU(MIPS))
909 CHECK_EQ(cpu.spr(flagsSPR) & flagsMask, originalState.spr(flagsSPR) & flagsMask);
910#endif
911 CHECK_EQ(cpu.sp(), originalSP);
912 });
913
914 emitFunctionEpilogue(jit);
915 jit.ret();
916 });
917 CHECK_EQ(probeCallCount, 4);
918}
919
920void testProbeModifiesStackPointerToInsideProbeStateOnStack()
921{
922 size_t increment = sizeof(uintptr_t);
923#if CPU(ARM64)
924 // The ARM64 probe uses ldp and stp which require 16 byte alignment.
925 increment = 2 * sizeof(uintptr_t);
926#endif
927 for (size_t offset = 0; offset < sizeof(Probe::State); offset += increment) {
928 testProbeModifiesStackPointer([=] (Probe::Context& context) -> void* {
929 return reinterpret_cast<uint8_t*>(probeStateForContext(context)) + offset;
930
931 });
932 }
933}
934
935void testProbeModifiesStackPointerToNBytesBelowSP()
936{
937 size_t increment = sizeof(uintptr_t);
938#if CPU(ARM64)
939 // The ARM64 probe uses ldp and stp which require 16 byte alignment.
940 increment = 2 * sizeof(uintptr_t);
941#endif
942 for (size_t offset = 0; offset < 1 * KB; offset += increment) {
943 testProbeModifiesStackPointer([=] (Probe::Context& context) -> void* {
944 return context.cpu.sp<uint8_t*>() - offset;
945 });
946 }
947}
948
949void testProbeModifiesProgramCounter()
950{
951 // This test relies on testProbeReadsArgumentRegisters() and testProbeWritesArgumentRegisters()
952 // having already validated that we can read and write from registers. We'll use these abilities
953 // to validate that the probe preserves register values.
954 unsigned probeCallCount = 0;
955 bool continuationWasReached = false;
956
957 MacroAssemblerCodeRef<JSEntryPtrTag> continuation = compile([&] (CCallHelpers& jit) {
958 // Validate that we reached the continuation.
959 jit.probe([&] (Probe::Context&) {
960 probeCallCount++;
961 continuationWasReached = true;
962 });
963
964 emitFunctionEpilogue(jit);
965 jit.ret();
966 });
967
968 compileAndRun<void>([&] (CCallHelpers& jit) {
969 emitFunctionPrologue(jit);
970
971 // Write expected values into the registers.
972 jit.probe([&] (Probe::Context& context) {
973 probeCallCount++;
974 context.cpu.pc() = untagCodePtr(continuation.code().executableAddress(), JSEntryPtrTag);
975 });
976
977 jit.breakpoint(); // We should never get here.
978 });
979 CHECK_EQ(probeCallCount, 2);
980 CHECK_EQ(continuationWasReached, true);
981}
982
983void testProbeModifiesStackValues()
984{
985 unsigned probeCallCount = 0;
986 CPUState originalState;
987 void* originalSP { nullptr };
988 void* newSP { nullptr };
989#if !CPU(MIPS)
990 uintptr_t modifiedFlags { 0 };
991#endif
992 size_t numberOfExtraEntriesToWrite { 10 }; // ARM64 requires that this be 2 word aligned.
993
994#if CPU(X86) || CPU(X86_64)
995 MacroAssembler::SPRegisterID flagsSPR = X86Registers::eflags;
996 uintptr_t flagsMask = 0xc5;
997#elif CPU(ARM_THUMB2)
998 MacroAssembler::SPRegisterID flagsSPR = ARMRegisters::apsr;
999 uintptr_t flagsMask = 0xf8000000;
1000#elif CPU(ARM64)
1001 MacroAssembler::SPRegisterID flagsSPR = ARM64Registers::nzcv;
1002 uintptr_t flagsMask = 0xf0000000;
1003#endif
1004
1005 compileAndRun<void>([&] (CCallHelpers& jit) {
1006 emitFunctionPrologue(jit);
1007
1008 // Write expected values into the registers.
1009 jit.probe([&] (Probe::Context& context) {
1010 auto& cpu = context.cpu;
1011 auto& stack = context.stack();
1012 probeCallCount++;
1013
1014 // Preserve the original CPU state.
1015 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
1016 originalState.gpr(id) = cpu.gpr(id);
1017 if (isSpecialGPR(id))
1018 continue;
1019 cpu.gpr(id) = testWord(static_cast<int>(id));
1020 }
1021 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id)) {
1022 originalState.fpr(id) = cpu.fpr(id);
1023 cpu.fpr(id) = bitwise_cast<double>(testWord64(id));
1024 }
1025#if !(CPU(MIPS))
1026 originalState.spr(flagsSPR) = cpu.spr(flagsSPR);
1027 modifiedFlags = originalState.spr(flagsSPR) ^ flagsMask;
1028 cpu.spr(flagsSPR) = modifiedFlags;
1029#endif
1030
1031 // Ensure that we'll be writing over the regions of the stack where the Probe::State is.
1032 originalSP = cpu.sp();
1033 newSP = reinterpret_cast<uintptr_t*>(probeStateForContext(context)) - numberOfExtraEntriesToWrite;
1034 cpu.sp() = newSP;
1035
1036 // Fill the stack with values.
1037 uintptr_t* p = reinterpret_cast<uintptr_t*>(newSP);
1038 int count = 0;
1039 stack.set<double>(p++, 1.234567);
1040 if (is32Bit())
1041 p++; // On 32-bit targets, a double takes up 2 uintptr_t.
1042 while (p < reinterpret_cast<uintptr_t*>(originalSP))
1043 stack.set<uintptr_t>(p++, testWord(count++));
1044 });
1045
1046 // Validate that the registers and stack have the expected values.
1047 jit.probe([&] (Probe::Context& context) {
1048 auto& cpu = context.cpu;
1049 auto& stack = context.stack();
1050 probeCallCount++;
1051
1052 // Validate the register values.
1053 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
1054 if (isFP(id)) {
1055 CHECK_EQ(cpu.gpr(id), originalState.gpr(id));
1056 continue;
1057 }
1058 if (isSpecialGPR(id))
1059 continue;
1060 CHECK_EQ(cpu.gpr(id), testWord(id));
1061 }
1062 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id))
1063#if CPU(MIPS)
1064 if (!(id & 1))
1065#endif
1066 CHECK_EQ(cpu.fpr<uint64_t>(id), testWord64(id));
1067#if !(CPU(MIPS))
1068 CHECK_EQ(cpu.spr(flagsSPR) & flagsMask, modifiedFlags & flagsMask);
1069#endif
1070 CHECK_EQ(cpu.sp(), newSP);
1071
1072 // Validate the stack values.
1073 uintptr_t* p = reinterpret_cast<uintptr_t*>(newSP);
1074 int count = 0;
1075 CHECK_EQ(stack.get<double>(p++), 1.234567);
1076 if (is32Bit())
1077 p++; // On 32-bit targets, a double takes up 2 uintptr_t.
1078 while (p < reinterpret_cast<uintptr_t*>(originalSP))
1079 CHECK_EQ(stack.get<uintptr_t>(p++), testWord(count++));
1080 });
1081
1082 // Restore the original state.
1083 jit.probe([&] (Probe::Context& context) {
1084 auto& cpu = context.cpu;
1085 probeCallCount++;
1086 for (auto id = CCallHelpers::firstRegister(); id <= CCallHelpers::lastRegister(); id = nextID(id)) {
1087 if (isSpecialGPR(id))
1088 continue;
1089 cpu.gpr(id) = originalState.gpr(id);
1090 }
1091 for (auto id = CCallHelpers::firstFPRegister(); id <= CCallHelpers::lastFPRegister(); id = nextID(id))
1092 cpu.fpr(id) = originalState.fpr(id);
1093#if !(CPU(MIPS))
1094 cpu.spr(flagsSPR) = originalState.spr(flagsSPR);
1095#endif
1096 cpu.sp() = originalSP;
1097 });
1098
1099 emitFunctionEpilogue(jit);
1100 jit.ret();
1101 });
1102
1103 CHECK_EQ(probeCallCount, 3);
1104}
1105#endif // ENABLE(MASM_PROBE)
1106
1107void testOrImmMem()
1108{
1109 // FIXME: this does not test that the or does not touch beyond its width.
1110 // I am not sure how to do such a test without a lot of complexity (running multiple threads, with a race on the high bits of the memory location).
1111 uint64_t memoryLocation = 0x12341234;
1112 auto or32 = compile([&] (CCallHelpers& jit) {
1113 emitFunctionPrologue(jit);
1114 jit.or32(CCallHelpers::TrustedImm32(42), CCallHelpers::AbsoluteAddress(&memoryLocation));
1115 emitFunctionEpilogue(jit);
1116 jit.ret();
1117 });
1118 invoke<void>(or32);
1119 CHECK_EQ(memoryLocation, 0x12341234 | 42);
1120
1121 memoryLocation = 0x12341234;
1122 auto or16 = compile([&] (CCallHelpers& jit) {
1123 emitFunctionPrologue(jit);
1124 jit.or16(CCallHelpers::TrustedImm32(42), CCallHelpers::AbsoluteAddress(&memoryLocation));
1125 emitFunctionEpilogue(jit);
1126 jit.ret();
1127 });
1128 invoke<void>(or16);
1129 CHECK_EQ(memoryLocation, 0x12341234 | 42);
1130
1131 memoryLocation = 0x12341234;
1132 auto or16InvalidLogicalImmInARM64 = compile([&] (CCallHelpers& jit) {
1133 emitFunctionPrologue(jit);
1134 jit.or16(CCallHelpers::TrustedImm32(0), CCallHelpers::AbsoluteAddress(&memoryLocation));
1135 emitFunctionEpilogue(jit);
1136 jit.ret();
1137 });
1138 invoke<void>(or16InvalidLogicalImmInARM64);
1139 CHECK_EQ(memoryLocation, 0x12341234);
1140}
1141
1142void testByteSwap()
1143{
1144#if CPU(X86_64) || CPU(ARM64)
1145 auto byteSwap16 = compile([] (CCallHelpers& jit) {
1146 emitFunctionPrologue(jit);
1147 jit.move(GPRInfo::argumentGPR0, GPRInfo::returnValueGPR);
1148 jit.byteSwap16(GPRInfo::returnValueGPR);
1149 emitFunctionEpilogue(jit);
1150 jit.ret();
1151 });
1152 CHECK_EQ(invoke<uint64_t>(byteSwap16, 0xaabbccddee001122), static_cast<uint64_t>(0x2211));
1153 CHECK_EQ(invoke<uint64_t>(byteSwap16, 0xaabbccddee00ffaa), static_cast<uint64_t>(0xaaff));
1154
1155 auto byteSwap32 = compile([] (CCallHelpers& jit) {
1156 emitFunctionPrologue(jit);
1157 jit.move(GPRInfo::argumentGPR0, GPRInfo::returnValueGPR);
1158 jit.byteSwap32(GPRInfo::returnValueGPR);
1159 emitFunctionEpilogue(jit);
1160 jit.ret();
1161 });
1162 CHECK_EQ(invoke<uint64_t>(byteSwap32, 0xaabbccddee001122), static_cast<uint64_t>(0x221100ee));
1163 CHECK_EQ(invoke<uint64_t>(byteSwap32, 0xaabbccddee00ffaa), static_cast<uint64_t>(0xaaff00ee));
1164
1165 auto byteSwap64 = compile([] (CCallHelpers& jit) {
1166 emitFunctionPrologue(jit);
1167 jit.move(GPRInfo::argumentGPR0, GPRInfo::returnValueGPR);
1168 jit.byteSwap64(GPRInfo::returnValueGPR);
1169 emitFunctionEpilogue(jit);
1170 jit.ret();
1171 });
1172 CHECK_EQ(invoke<uint64_t>(byteSwap64, 0xaabbccddee001122), static_cast<uint64_t>(0x221100eeddccbbaa));
1173 CHECK_EQ(invoke<uint64_t>(byteSwap64, 0xaabbccddee00ffaa), static_cast<uint64_t>(0xaaff00eeddccbbaa));
1174#endif
1175}
1176
1177void testMoveDoubleConditionally32()
1178{
1179#if CPU(X86_64) | CPU(ARM64)
1180 double arg1 = 0;
1181 double arg2 = 0;
1182 const double zero = -0;
1183
1184 const double chosenDouble = 6.00000059604644775390625;
1185 CHECK_EQ(static_cast<double>(static_cast<float>(chosenDouble)) == chosenDouble, false);
1186
1187 auto sel = compile([&] (CCallHelpers& jit) {
1188 emitFunctionPrologue(jit);
1189 jit.loadDouble(CCallHelpers::TrustedImmPtr(&zero), FPRInfo::returnValueFPR);
1190 jit.loadDouble(CCallHelpers::TrustedImmPtr(&arg1), FPRInfo::fpRegT1);
1191 jit.loadDouble(CCallHelpers::TrustedImmPtr(&arg2), FPRInfo::fpRegT2);
1192
1193 jit.move(MacroAssembler::TrustedImm32(-1), GPRInfo::regT0);
1194 jit.moveDoubleConditionally32(MacroAssembler::Equal, GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT1, FPRInfo::fpRegT2, FPRInfo::returnValueFPR);
1195
1196 emitFunctionEpilogue(jit);
1197 jit.ret();
1198 });
1199
1200 arg1 = chosenDouble;
1201 arg2 = 43;
1202 CHECK_EQ(invoke<double>(sel), chosenDouble);
1203
1204 arg1 = 43;
1205 arg2 = chosenDouble;
1206 CHECK_EQ(invoke<double>(sel), 43.0);
1207
1208#endif
1209}
1210
1211void testMoveDoubleConditionally64()
1212{
1213#if CPU(X86_64) | CPU(ARM64)
1214 double arg1 = 0;
1215 double arg2 = 0;
1216 const double zero = -0;
1217
1218 const double chosenDouble = 6.00000059604644775390625;
1219 CHECK_EQ(static_cast<double>(static_cast<float>(chosenDouble)) == chosenDouble, false);
1220
1221 auto sel = compile([&] (CCallHelpers& jit) {
1222 emitFunctionPrologue(jit);
1223 jit.loadDouble(CCallHelpers::TrustedImmPtr(&zero), FPRInfo::returnValueFPR);
1224 jit.loadDouble(CCallHelpers::TrustedImmPtr(&arg1), FPRInfo::fpRegT1);
1225 jit.loadDouble(CCallHelpers::TrustedImmPtr(&arg2), FPRInfo::fpRegT2);
1226
1227 jit.move(MacroAssembler::TrustedImm64(-1), GPRInfo::regT0);
1228 jit.moveDoubleConditionally64(MacroAssembler::Equal, GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT1, FPRInfo::fpRegT2, FPRInfo::returnValueFPR);
1229
1230 emitFunctionEpilogue(jit);
1231 jit.ret();
1232 });
1233
1234 arg1 = chosenDouble;
1235 arg2 = 43;
1236 CHECK_EQ(invoke<double>(sel), chosenDouble);
1237
1238 arg1 = 43;
1239 arg2 = chosenDouble;
1240 CHECK_EQ(invoke<double>(sel), 43.0);
1241
1242#endif
1243}
1244
1245static void testCagePreservesPACFailureBit()
1246{
1247#if GIGACAGE_ENABLED
1248 // Placate ASan builds and any environments that disables the Gigacage.
1249 if (!Gigacage::shouldBeEnabled())
1250 return;
1251
1252 RELEASE_ASSERT(!Gigacage::isDisablingPrimitiveGigacageForbidden());
1253 auto cage = compile([] (CCallHelpers& jit) {
1254 emitFunctionPrologue(jit);
1255 jit.cageConditionally(Gigacage::Primitive, GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
1256 jit.move(GPRInfo::argumentGPR0, GPRInfo::returnValueGPR);
1257 emitFunctionEpilogue(jit);
1258 jit.ret();
1259 });
1260
1261 void* ptr = Gigacage::tryMalloc(Gigacage::Primitive, 1);
1262 void* taggedPtr = tagArrayPtr(ptr, 1);
1263 RELEASE_ASSERT(hasOneBitSet(Gigacage::size(Gigacage::Primitive) << 2));
1264 void* notCagedPtr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + (Gigacage::size(Gigacage::Primitive) << 2));
1265 CHECK_NOT_EQ(Gigacage::caged(Gigacage::Primitive, notCagedPtr), notCagedPtr);
1266 void* taggedNotCagedPtr = tagArrayPtr(notCagedPtr, 1);
1267
1268 if (isARM64E()) {
1269 // FIXME: This won't work if authentication failures trap but I don't know how to test for that right now.
1270 CHECK_NOT_EQ(invoke<void*>(cage, taggedPtr, 2), ptr);
1271 CHECK_EQ(invoke<void*>(cage, taggedNotCagedPtr, 1), untagArrayPtr(taggedPtr, 2));
1272 } else
1273 CHECK_EQ(invoke<void*>(cage, taggedPtr, 2), ptr);
1274
1275 CHECK_EQ(invoke<void*>(cage, taggedPtr, 1), ptr);
1276
1277 auto cageWithoutAuthentication = compile([] (CCallHelpers& jit) {
1278 emitFunctionPrologue(jit);
1279 jit.cageWithoutUntagging(Gigacage::Primitive, GPRInfo::argumentGPR0);
1280 jit.move(GPRInfo::argumentGPR0, GPRInfo::returnValueGPR);
1281 emitFunctionEpilogue(jit);
1282 jit.ret();
1283 });
1284
1285 CHECK_EQ(invoke<void*>(cageWithoutAuthentication, taggedPtr), taggedPtr);
1286 if (isARM64E()) {
1287 // FIXME: This won't work if authentication failures trap but I don't know how to test for that right now.
1288 CHECK_NOT_EQ(invoke<void*>(cageWithoutAuthentication, taggedNotCagedPtr), taggedNotCagedPtr);
1289 CHECK_NOT_EQ(untagArrayPtr(invoke<void*>(cageWithoutAuthentication, taggedNotCagedPtr), 1), notCagedPtr);
1290 CHECK_NOT_EQ(invoke<void*>(cageWithoutAuthentication, taggedNotCagedPtr), taggedPtr);
1291 CHECK_NOT_EQ(untagArrayPtr(invoke<void*>(cageWithoutAuthentication, taggedNotCagedPtr), 1), ptr);
1292 }
1293
1294 Gigacage::free(Gigacage::Primitive, ptr);
1295#endif
1296}
1297
1298#define RUN(test) do { \
1299 if (!shouldRun(#test)) \
1300 break; \
1301 numberOfTests++; \
1302 tasks.append( \
1303 createSharedTask<void()>( \
1304 [&] () { \
1305 dataLog(#test "...\n"); \
1306 test; \
1307 dataLog(#test ": OK!\n"); \
1308 })); \
1309 } while (false);
1310
1311void run(const char* filter)
1312{
1313 JSC::initializeThreading();
1314 unsigned numberOfTests = 0;
1315
1316 Deque<RefPtr<SharedTask<void()>>> tasks;
1317
1318 auto shouldRun = [&] (const char* testName) -> bool {
1319 return !filter || WTF::findIgnoringASCIICaseWithoutLength(testName, filter) != WTF::notFound;
1320 };
1321
1322 RUN(testSimple());
1323 RUN(testGetEffectiveAddress(0xff00, 42, 8, CCallHelpers::TimesEight));
1324 RUN(testGetEffectiveAddress(0xff00, -200, -300, CCallHelpers::TimesEight));
1325 RUN(testBranchTruncateDoubleToInt32(0, 0));
1326 RUN(testBranchTruncateDoubleToInt32(42, 42));
1327 RUN(testBranchTruncateDoubleToInt32(42.7, 42));
1328 RUN(testBranchTruncateDoubleToInt32(-1234, -1234));
1329 RUN(testBranchTruncateDoubleToInt32(-1234.56, -1234));
1330 RUN(testBranchTruncateDoubleToInt32(std::numeric_limits<double>::infinity(), 0));
1331 RUN(testBranchTruncateDoubleToInt32(-std::numeric_limits<double>::infinity(), 0));
1332 RUN(testBranchTruncateDoubleToInt32(std::numeric_limits<double>::quiet_NaN(), 0));
1333 RUN(testBranchTruncateDoubleToInt32(std::numeric_limits<double>::signaling_NaN(), 0));
1334 RUN(testBranchTruncateDoubleToInt32(std::numeric_limits<double>::max(), 0));
1335 RUN(testBranchTruncateDoubleToInt32(-std::numeric_limits<double>::max(), 0));
1336 // We run this last one to make sure that we don't use flags that were not
1337 // reset to check a conversion result
1338 RUN(testBranchTruncateDoubleToInt32(123, 123));
1339
1340 RUN(testCompareDouble(MacroAssembler::DoubleEqual));
1341 RUN(testCompareDouble(MacroAssembler::DoubleNotEqual));
1342 RUN(testCompareDouble(MacroAssembler::DoubleGreaterThan));
1343 RUN(testCompareDouble(MacroAssembler::DoubleGreaterThanOrEqual));
1344 RUN(testCompareDouble(MacroAssembler::DoubleLessThan));
1345 RUN(testCompareDouble(MacroAssembler::DoubleLessThanOrEqual));
1346 RUN(testCompareDouble(MacroAssembler::DoubleEqualOrUnordered));
1347 RUN(testCompareDouble(MacroAssembler::DoubleNotEqualOrUnordered));
1348 RUN(testCompareDouble(MacroAssembler::DoubleGreaterThanOrUnordered));
1349 RUN(testCompareDouble(MacroAssembler::DoubleGreaterThanOrEqualOrUnordered));
1350 RUN(testCompareDouble(MacroAssembler::DoubleLessThanOrUnordered));
1351 RUN(testCompareDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered));
1352 RUN(testMul32WithImmediates());
1353
1354#if CPU(X86_64)
1355 RUN(testBranchTestBit32RegReg());
1356 RUN(testBranchTestBit32RegImm());
1357 RUN(testBranchTestBit32AddrImm());
1358 RUN(testBranchTestBit64RegReg());
1359 RUN(testBranchTestBit64RegImm());
1360 RUN(testBranchTestBit64AddrImm());
1361#endif
1362
1363#if CPU(ARM64)
1364 RUN(testMul32SignExtend());
1365#endif
1366
1367#if CPU(X86) || CPU(X86_64) || CPU(ARM64)
1368 RUN(testCompareFloat(MacroAssembler::DoubleEqual));
1369 RUN(testCompareFloat(MacroAssembler::DoubleNotEqual));
1370 RUN(testCompareFloat(MacroAssembler::DoubleGreaterThan));
1371 RUN(testCompareFloat(MacroAssembler::DoubleGreaterThanOrEqual));
1372 RUN(testCompareFloat(MacroAssembler::DoubleLessThan));
1373 RUN(testCompareFloat(MacroAssembler::DoubleLessThanOrEqual));
1374 RUN(testCompareFloat(MacroAssembler::DoubleEqualOrUnordered));
1375 RUN(testCompareFloat(MacroAssembler::DoubleNotEqualOrUnordered));
1376 RUN(testCompareFloat(MacroAssembler::DoubleGreaterThanOrUnordered));
1377 RUN(testCompareFloat(MacroAssembler::DoubleGreaterThanOrEqualOrUnordered));
1378 RUN(testCompareFloat(MacroAssembler::DoubleLessThanOrUnordered));
1379 RUN(testCompareFloat(MacroAssembler::DoubleLessThanOrEqualOrUnordered));
1380#endif
1381
1382#if ENABLE(MASM_PROBE)
1383 RUN(testProbeReadsArgumentRegisters());
1384 RUN(testProbeWritesArgumentRegisters());
1385 RUN(testProbePreservesGPRS());
1386 RUN(testProbeModifiesStackPointerToInsideProbeStateOnStack());
1387 RUN(testProbeModifiesStackPointerToNBytesBelowSP());
1388 RUN(testProbeModifiesProgramCounter());
1389 RUN(testProbeModifiesStackValues());
1390#endif // ENABLE(MASM_PROBE)
1391
1392 RUN(testByteSwap());
1393 RUN(testMoveDoubleConditionally32());
1394 RUN(testMoveDoubleConditionally64());
1395
1396 RUN(testCagePreservesPACFailureBit());
1397
1398 RUN(testOrImmMem());
1399
1400 if (tasks.isEmpty())
1401 usage();
1402
1403 Lock lock;
1404
1405 Vector<Ref<Thread>> threads;
1406 for (unsigned i = filter ? 1 : WTF::numberOfProcessorCores(); i--;) {
1407 threads.append(
1408 Thread::create(
1409 "testmasm thread",
1410 [&] () {
1411 for (;;) {
1412 RefPtr<SharedTask<void()>> task;
1413 {
1414 LockHolder locker(lock);
1415 if (tasks.isEmpty())
1416 return;
1417 task = tasks.takeFirst();
1418 }
1419
1420 task->run();
1421 }
1422 }));
1423 }
1424
1425 for (auto& thread : threads)
1426 thread->waitForCompletion();
1427 crashLock.lock();
1428 dataLog("Completed ", numberOfTests, " tests\n");
1429}
1430
1431} // anonymous namespace
1432
1433#else // not ENABLE(JIT)
1434
1435static void run(const char*)
1436{
1437 dataLog("JIT is not enabled.\n");
1438}
1439
1440#endif // ENABLE(JIT)
1441
1442int main(int argc, char** argv)
1443{
1444 const char* filter = nullptr;
1445 switch (argc) {
1446 case 1:
1447 break;
1448 case 2:
1449 filter = argv[1];
1450 break;
1451 default:
1452 usage();
1453 break;
1454 }
1455
1456 run(filter);
1457 return 0;
1458}
1459
1460#if OS(WINDOWS)
1461extern "C" __declspec(dllexport) int WINAPI dllLauncherEntryPoint(int argc, const char* argv[])
1462{
1463 return main(argc, const_cast<char**>(argv));
1464}
1465#endif
1466