1/*
2 * Copyright (C) 2016-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include "CPU.h"
29#include "JSCJSValue.h"
30
31namespace JSC {
32
33// We use these memory operations when modifying memory that might be scanned by the concurrent collector.
34// We don't call the default operations because they're not guaranteed to store to memory in eight byte aligned
35// chunks. If we happened to fall into the system's normal byte copy loop, we may see a torn JSValue in the
36// concurrent collector.
37
38constexpr size_t smallCutoff = 30 * 8;
39constexpr size_t mediumCutoff = 4 * 1024;
40
41// This is a forwards loop so gcSafeMemmove can rely on the direction.
42template <typename T>
43ALWAYS_INLINE void gcSafeMemcpy(T* dst, T* src, size_t bytes)
44{
45 static_assert(sizeof(T) == sizeof(JSValue));
46 RELEASE_ASSERT(bytes % 8 == 0);
47
48#if USE(JSVALUE64)
49
50 auto slowPathForwardMemcpy = [&] {
51 size_t count = bytes / 8;
52 for (unsigned i = 0; i < count; ++i)
53 bitwise_cast<volatile uint64_t*>(dst)[i] = bitwise_cast<volatile uint64_t*>(src)[i];
54 };
55
56#if COMPILER(GCC_COMPATIBLE) && USE(JSVALUE64)
57 if (bytes <= smallCutoff)
58 slowPathForwardMemcpy();
59 else if (isARM64() || bytes <= mediumCutoff) {
60#if CPU(X86_64)
61 size_t alignedBytes = (bytes / 64) * 64;
62 size_t tmp;
63 size_t offset = 0;
64 asm volatile(
65 ".balign 32\t\n"
66 "1:\t\n"
67 "cmpq %q[offset], %q[alignedBytes]\t\n"
68 "je 2f\t\n"
69 "movups (%q[src], %q[offset], 1), %%xmm0\t\n"
70 "movups 16(%q[src], %q[offset], 1), %%xmm1\t\n"
71 "movups 32(%q[src], %q[offset], 1), %%xmm2\t\n"
72 "movups 48(%q[src], %q[offset], 1), %%xmm3\t\n"
73 "movups %%xmm0, (%q[dst], %q[offset], 1)\t\n"
74 "movups %%xmm1, 16(%q[dst], %q[offset], 1)\t\n"
75 "movups %%xmm2, 32(%q[dst], %q[offset], 1)\t\n"
76 "movups %%xmm3, 48(%q[dst], %q[offset], 1)\t\n"
77 "addq $64, %q[offset]\t\n"
78 "jmp 1b\t\n"
79
80 "2:\t\n"
81 "cmpq %q[offset], %q[bytes]\t\n"
82 "je 3f\t\n"
83 "movq (%q[src], %q[offset], 1), %q[tmp]\t\n"
84 "movq %q[tmp], (%q[dst], %q[offset], 1)\t\n"
85 "addq $8, %q[offset]\t\n"
86 "jmp 2b\t\n"
87
88 "3:\t\n"
89
90 : [alignedBytes] "+r" (alignedBytes), [bytes] "+r" (bytes), [tmp] "+r" (tmp), [offset] "+r" (offset), [dst] "+r" (dst), [src] "+r" (src)
91 :
92 : "xmm0", "xmm1", "xmm2", "xmm3", "memory", "cc"
93 );
94#elif CPU(ARM64)
95 uint64_t alignedBytes = (static_cast<uint64_t>(bytes) / 16) * 16;
96 size_t offset = 0;
97
98 uint64_t dstPtr = static_cast<uint64_t>(bitwise_cast<uintptr_t>(dst));
99 uint64_t srcPtr = static_cast<uint64_t>(bitwise_cast<uintptr_t>(src));
100
101 asm volatile(
102 "1:\t\n"
103 "cmp %x[offset], %x[alignedBytes]\t\n"
104 "b.eq 2f\t\n"
105 "ldr q0, [%x[srcPtr], %x[offset]]\t\n"
106 "str q0, [%x[dstPtr], %x[offset]]\t\n"
107 "add %x[offset], %x[offset], #0x10\t\n"
108 "b 1b\t\n"
109
110 "2:\t\n"
111 "cmp %x[offset], %x[bytes]\t\n"
112 "b.eq 3f\t\n"
113 "ldr d0, [%x[srcPtr], %x[offset]]\t\n"
114 "str d0, [%x[dstPtr], %x[offset]]\t\n"
115 "add %x[offset], %x[offset], #0x8\t\n"
116 "b 2b\t\n"
117
118 "3:\t\n"
119
120 : [alignedBytes] "+r" (alignedBytes), [bytes] "+r" (bytes), [offset] "+r" (offset), [dstPtr] "+r" (dstPtr), [srcPtr] "+r" (srcPtr)
121 :
122 : "d0", "d1", "memory"
123 );
124#else
125#error "Unknown architecture."
126#endif // CPU(X86_64)
127 } else {
128 RELEASE_ASSERT(isX86_64());
129#if CPU(X86_64)
130 size_t count = bytes / 8;
131 asm volatile(
132 ".balign 16\t\n"
133 "cld\t\n"
134 "rep movsq\t\n"
135 : "+D" (dst), "+S" (src), "+c" (count)
136 :
137 : "memory");
138#endif // CPU(X86_64)
139 }
140#else
141 slowPathForwardMemcpy();
142#endif // COMPILER(GCC_COMPATIBLE)
143#else
144 memcpy(dst, src, bytes);
145#endif // USE(JSVALUE64)
146}
147
148template <typename T>
149ALWAYS_INLINE void gcSafeMemmove(T* dst, T* src, size_t bytes)
150{
151 static_assert(sizeof(T) == sizeof(JSValue));
152 RELEASE_ASSERT(bytes % 8 == 0);
153#if USE(JSVALUE64)
154 if (bitwise_cast<uintptr_t>(src) >= bitwise_cast<uintptr_t>(dst)) {
155 // This is written to do a forwards loop, so calling it is ok.
156 gcSafeMemcpy(dst, src, bytes);
157 return;
158 }
159
160 if ((static_cast<uint64_t>(bitwise_cast<uintptr_t>(src)) + static_cast<uint64_t>(bytes)) <= static_cast<uint64_t>(bitwise_cast<uintptr_t>(dst))) {
161 gcSafeMemcpy(dst, src, bytes);
162 return;
163 }
164
165 auto slowPathBackwardsMemmove = [&] {
166 size_t count = bytes / 8;
167 for (size_t i = count; i--; )
168 bitwise_cast<volatile uint64_t*>(dst)[i] = bitwise_cast<volatile uint64_t*>(src)[i];
169 };
170
171#if COMPILER(GCC_COMPATIBLE)
172 if (bytes <= smallCutoff)
173 slowPathBackwardsMemmove();
174 else {
175#if CPU(X86_64)
176 size_t alignedBytes = (bytes / 64) * 64;
177
178 size_t tail = alignedBytes;
179 size_t tmp;
180 asm volatile(
181 "2:\t\n"
182 "cmpq %q[tail], %q[bytes]\t\n"
183 "je 1f\t\n"
184 "addq $-8, %q[bytes]\t\n"
185 "movq (%q[src], %q[bytes], 1), %q[tmp]\t\n"
186 "movq %q[tmp], (%q[dst], %q[bytes], 1)\t\n"
187 "jmp 2b\t\n"
188
189 "1:\t\n"
190 "test %q[alignedBytes], %q[alignedBytes]\t\n"
191 "jz 3f\t\n"
192
193 ".balign 32\t\n"
194 "100:\t\n"
195
196 "movups -64(%q[src], %q[alignedBytes], 1), %%xmm0\t\n"
197 "movups -48(%q[src], %q[alignedBytes], 1), %%xmm1\t\n"
198 "movups -32(%q[src], %q[alignedBytes], 1), %%xmm2\t\n"
199 "movups -16(%q[src], %q[alignedBytes], 1), %%xmm3\t\n"
200 "movups %%xmm0, -64(%q[dst], %q[alignedBytes], 1)\t\n"
201 "movups %%xmm1, -48(%q[dst], %q[alignedBytes], 1)\t\n"
202 "movups %%xmm2, -32(%q[dst], %q[alignedBytes], 1)\t\n"
203 "movups %%xmm3, -16(%q[dst], %q[alignedBytes], 1)\t\n"
204 "addq $-64, %q[alignedBytes]\t\n"
205 "jnz 100b\t\n"
206
207 "3:\t\n"
208
209 : [alignedBytes] "+r" (alignedBytes), [tail] "+r" (tail), [bytes] "+r" (bytes), [tmp] "+r" (tmp), [dst] "+r" (dst), [src] "+r" (src)
210 :
211 : "xmm0", "xmm1", "xmm2", "xmm3", "memory", "cc"
212 );
213#elif CPU(ARM64)
214 uint64_t alignedBytes = (static_cast<uint64_t>(bytes) / 16) * 16;
215 uint64_t dstPtr = static_cast<uint64_t>(bitwise_cast<uintptr_t>(dst));
216 uint64_t srcPtr = static_cast<uint64_t>(bitwise_cast<uintptr_t>(src));
217
218 asm volatile(
219 "1:\t\n"
220 "cmp %x[alignedBytes], %x[bytes]\t\n"
221 "b.eq 2f\t\n"
222 "sub %x[bytes], %x[bytes], #0x8\t\n"
223 "ldr d0, [%x[srcPtr], %x[bytes]]\t\n"
224 "str d0, [%x[dstPtr], %x[bytes]]\t\n"
225 "b 1b\t\n"
226
227 "2:\t\n"
228 "cbz %x[alignedBytes], 3f\t\n"
229 "sub %x[alignedBytes], %x[alignedBytes], #0x10\t\n"
230 "ldr q0, [%x[srcPtr], %x[alignedBytes]]\t\n"
231 "str q0, [%x[dstPtr], %x[alignedBytes]]\t\n"
232 "b 2b\t\n"
233
234 "3:\t\n"
235
236 : [alignedBytes] "+r" (alignedBytes), [bytes] "+r" (bytes), [dstPtr] "+r" (dstPtr), [srcPtr] "+r" (srcPtr)
237 :
238 : "d0", "d1", "memory"
239 );
240#else
241#error "Unknown architecture."
242#endif // CPU(X86_64)
243 }
244#else
245 slowPathBackwardsMemmove();
246#endif // COMPILER(GCC_COMPATIBLE)
247#else
248 memmove(dst, src, bytes);
249#endif // USE(JSVALUE64)
250}
251
252template <typename T>
253ALWAYS_INLINE void gcSafeZeroMemory(T* dst, size_t bytes)
254{
255 static_assert(sizeof(T) == sizeof(JSValue));
256 RELEASE_ASSERT(bytes % 8 == 0);
257#if USE(JSVALUE64)
258#if COMPILER(GCC_COMPATIBLE)
259#if CPU(X86_64)
260 uint64_t zero = 0;
261 size_t count = bytes / 8;
262 asm volatile (
263 "rep stosq\n\t"
264 : "+D"(dst), "+c"(count)
265 : "a"(zero)
266 : "memory"
267 );
268#elif CPU(ARM64)
269 uint64_t alignedBytes = (static_cast<uint64_t>(bytes) / 64) * 64;
270 uint64_t dstPtr = static_cast<uint64_t>(bitwise_cast<uintptr_t>(dst));
271 uint64_t end = dstPtr + bytes;
272 uint64_t alignedEnd = dstPtr + alignedBytes;
273 asm volatile(
274 "movi d0, #0\t\n"
275 "movi d1, #0\t\n"
276
277 ".p2align 4\t\n"
278 "2:\t\n"
279 "cmp %x[dstPtr], %x[alignedEnd]\t\n"
280 "b.eq 4f\t\n"
281 "stnp q0, q0, [%x[dstPtr]]\t\n"
282 "stnp q0, q0, [%x[dstPtr], #0x20]\t\n"
283 "add %x[dstPtr], %x[dstPtr], #0x40\t\n"
284 "b 2b\t\n"
285
286 "4:\t\n"
287 "cmp %x[dstPtr], %x[end]\t\n"
288 "b.eq 5f\t\n"
289 "str d0, [%x[dstPtr]], #0x8\t\n"
290 "b 4b\t\n"
291
292 "5:\t\n"
293
294 : [alignedBytes] "+r" (alignedBytes), [bytes] "+r" (bytes), [dstPtr] "+r" (dstPtr), [end] "+r" (end), [alignedEnd] "+r" (alignedEnd)
295 :
296 : "d0", "d1", "memory"
297 );
298#else
299#error "Unknown architecture."
300#endif // CPU(X86_64)
301#else
302 size_t count = bytes / 8;
303 for (size_t i = 0; i < count; ++i)
304 bitwise_cast<volatile uint64_t*>(dst)[i] = 0;
305#endif // COMPILER(GCC_COMPATIBLE)
306#else
307 memset(dst, 0, bytes);
308#endif // USE(JSVALUE64)
309}
310
311} // namespace JSC
312