1/*
2 * Copyright (C) 2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "WasmOperations.h"
28
29#if ENABLE(WEBASSEMBLY)
30
31#include "FrameTracers.h"
32#include "IteratorOperations.h"
33#include "JITExceptions.h"
34#include "JSCJSValueInlines.h"
35#include "JSGlobalObjectInlines.h"
36#include "JSWebAssemblyHelpers.h"
37#include "JSWebAssemblyInstance.h"
38#include "JSWebAssemblyRuntimeError.h"
39#include "ProbeContext.h"
40#include "WasmCallee.h"
41#include "WasmCallingConvention.h"
42#include "WasmContextInlines.h"
43#include "WasmInstance.h"
44#include "WasmMemory.h"
45#include "WasmNameSection.h"
46#include "WasmOMGForOSREntryPlan.h"
47#include "WasmOMGPlan.h"
48#include "WasmOSREntryData.h"
49#include "WasmSignatureInlines.h"
50#include "WasmWorklist.h"
51#include <wtf/DataLog.h>
52#include <wtf/Locker.h>
53#include <wtf/MonotonicTime.h>
54#include <wtf/StdLibExtras.h>
55
56IGNORE_WARNINGS_BEGIN("frame-address")
57
58namespace JSC { namespace Wasm {
59
60void JIT_OPERATION operationWasmThrowBadI64(JSWebAssemblyInstance* instance)
61{
62 VM& vm = instance->vm();
63 CallFrame* callFrame = DECLARE_CALL_FRAME(vm);
64 JITOperationPrologueCallFrameTracer tracer(vm, callFrame);
65
66 {
67 auto throwScope = DECLARE_THROW_SCOPE(vm);
68 JSGlobalObject* globalObject = instance->globalObject();
69 auto* error = ErrorInstance::create(globalObject, vm, globalObject->errorStructure(ErrorType::TypeError), "i64 not allowed as return type or argument to an imported function"_s);
70 throwException(globalObject, throwScope, error);
71 }
72
73 genericUnwind(vm, callFrame);
74 ASSERT(!!vm.callFrameForCatch);
75}
76
77static bool shouldTriggerOMGCompile(TierUpCount& tierUp, OMGCallee* replacement, uint32_t functionIndex)
78{
79 if (!replacement && !tierUp.checkIfOptimizationThresholdReached()) {
80 dataLogLnIf(Options::verboseOSR(), "delayOMGCompile counter = ", tierUp, " for ", functionIndex);
81 dataLogLnIf(Options::verboseOSR(), "Choosing not to OMG-optimize ", functionIndex, " yet.");
82 return false;
83 }
84 return true;
85}
86
87static void triggerOMGReplacementCompile(TierUpCount& tierUp, OMGCallee* replacement, Instance* instance, Wasm::CodeBlock& codeBlock, uint32_t functionIndex)
88{
89 if (replacement) {
90 tierUp.optimizeSoon(functionIndex);
91 return;
92 }
93
94 bool compile = false;
95 {
96 auto locker = holdLock(tierUp.getLock());
97 switch (tierUp.m_compilationStatusForOMG) {
98 case TierUpCount::CompilationStatus::StartCompilation:
99 tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
100 return;
101 case TierUpCount::CompilationStatus::NotCompiled:
102 compile = true;
103 tierUp.m_compilationStatusForOMG = TierUpCount::CompilationStatus::StartCompilation;
104 break;
105 default:
106 break;
107 }
108 }
109
110 if (compile) {
111 dataLogLnIf(Options::verboseOSR(), "triggerOMGReplacement for ", functionIndex);
112 // We need to compile the code.
113 Ref<Plan> plan = adoptRef(*new OMGPlan(instance->context(), Ref<Wasm::Module>(instance->module()), functionIndex, codeBlock.mode(), Plan::dontFinalize()));
114 ensureWorklist().enqueue(plan.copyRef());
115 if (UNLIKELY(!Options::useConcurrentJIT()))
116 plan->waitForCompletion();
117 else
118 tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
119 }
120}
121
122SUPPRESS_ASAN
123static void doOSREntry(Instance* instance, Probe::Context& context, BBQCallee& callee, OMGForOSREntryCallee& osrEntryCallee, OSREntryData& osrEntryData)
124{
125 auto returnWithoutOSREntry = [&] {
126 context.gpr(GPRInfo::argumentGPR0) = 0;
127 };
128
129 RELEASE_ASSERT(osrEntryCallee.osrEntryScratchBufferSize() == osrEntryData.values().size());
130
131 uint64_t* buffer = instance->context()->scratchBufferForSize(osrEntryCallee.osrEntryScratchBufferSize());
132 if (!buffer)
133 return returnWithoutOSREntry();
134
135 dataLogLnIf(Options::verboseOSR(), osrEntryData.functionIndex(), ":OMG OSR entry: got entry callee ", RawPointer(&osrEntryCallee));
136
137 // 1. Place required values in scratch buffer.
138 for (unsigned index = 0; index < osrEntryData.values().size(); ++index) {
139 const OSREntryValue& value = osrEntryData.values()[index];
140 dataLogLnIf(Options::verboseOSR(), "OMG OSR entry values[", index, "] ", value.type(), " ", value);
141 if (value.isGPR()) {
142 switch (value.type().kind()) {
143 case B3::Float:
144 case B3::Double:
145 RELEASE_ASSERT_NOT_REACHED();
146 default:
147 *bitwise_cast<uint64_t*>(buffer + index) = context.gpr(value.gpr());
148 }
149 } else if (value.isFPR()) {
150 switch (value.type().kind()) {
151 case B3::Float:
152 case B3::Double:
153 *bitwise_cast<double*>(buffer + index) = context.fpr(value.fpr());
154 break;
155 default:
156 RELEASE_ASSERT_NOT_REACHED();
157 }
158 } else if (value.isConstant()) {
159 switch (value.type().kind()) {
160 case B3::Float:
161 *bitwise_cast<float*>(buffer + index) = value.floatValue();
162 break;
163 case B3::Double:
164 *bitwise_cast<double*>(buffer + index) = value.doubleValue();
165 break;
166 default:
167 *bitwise_cast<uint64_t*>(buffer + index) = value.value();
168 }
169 } else if (value.isStack()) {
170 switch (value.type().kind()) {
171 case B3::Float:
172 *bitwise_cast<float*>(buffer + index) = *bitwise_cast<float*>(bitwise_cast<uint8_t*>(context.fp()) + value.offsetFromFP());
173 break;
174 case B3::Double:
175 *bitwise_cast<double*>(buffer + index) = *bitwise_cast<double*>(bitwise_cast<uint8_t*>(context.fp()) + value.offsetFromFP());
176 break;
177 default:
178 *bitwise_cast<uint64_t*>(buffer + index) = *bitwise_cast<uint64_t*>(bitwise_cast<uint8_t*>(context.fp()) + value.offsetFromFP());
179 break;
180 }
181 } else if (value.isStackArgument()) {
182 switch (value.type().kind()) {
183 case B3::Float:
184 *bitwise_cast<float*>(buffer + index) = *bitwise_cast<float*>(bitwise_cast<uint8_t*>(context.sp()) + value.offsetFromSP());
185 break;
186 case B3::Double:
187 *bitwise_cast<double*>(buffer + index) = *bitwise_cast<double*>(bitwise_cast<uint8_t*>(context.sp()) + value.offsetFromSP());
188 break;
189 default:
190 *bitwise_cast<uint64_t*>(buffer + index) = *bitwise_cast<uint64_t*>(bitwise_cast<uint8_t*>(context.sp()) + value.offsetFromSP());
191 break;
192 }
193 } else
194 RELEASE_ASSERT_NOT_REACHED();
195 }
196
197 // 2. Restore callee saves.
198 RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
199 for (const RegisterAtOffset& entry : *callee.calleeSaveRegisters()) {
200 if (dontRestoreRegisters.get(entry.reg()))
201 continue;
202 if (entry.reg().isGPR())
203 context.gpr(entry.reg().gpr()) = *bitwise_cast<UCPURegister*>(bitwise_cast<uint8_t*>(context.fp()) + entry.offset());
204 else
205 context.fpr(entry.reg().fpr()) = *bitwise_cast<double*>(bitwise_cast<uint8_t*>(context.fp()) + entry.offset());
206 }
207
208 // 3. Function epilogue, like a tail-call.
209 UCPURegister* framePointer = bitwise_cast<UCPURegister*>(context.fp());
210#if CPU(X86_64)
211 // move(framePointerRegister, stackPointerRegister);
212 // pop(framePointerRegister);
213 context.fp() = bitwise_cast<UCPURegister*>(*framePointer);
214 context.sp() = framePointer + 1;
215 static_assert(AssemblyHelpers::prologueStackPointerDelta() == sizeof(void*) * 1);
216#elif CPU(ARM64E) || CPU(ARM64)
217 // move(framePointerRegister, stackPointerRegister);
218 // popPair(framePointerRegister, linkRegister);
219 context.fp() = bitwise_cast<UCPURegister*>(*framePointer);
220 context.gpr(ARM64Registers::lr) = bitwise_cast<UCPURegister>(*(framePointer + 1));
221 context.sp() = framePointer + 2;
222 static_assert(AssemblyHelpers::prologueStackPointerDelta() == sizeof(void*) * 2);
223#if CPU(ARM64E)
224 // LR needs to be untagged since OSR entry function prologue will tag it with SP. This is similar to tail-call.
225 context.gpr(ARM64Registers::lr) = bitwise_cast<UCPURegister>(untagCodePtr(context.gpr<void*>(ARM64Registers::lr), bitwise_cast<PtrTag>(context.sp())));
226#endif
227#else
228#error Unsupported architecture.
229#endif
230 // 4. Configure argument registers to jump to OSR entry from the caller of this runtime function.
231 context.gpr(GPRInfo::argumentGPR0) = bitwise_cast<UCPURegister>(buffer);
232 context.gpr(GPRInfo::argumentGPR1) = bitwise_cast<UCPURegister>(osrEntryCallee.entrypoint().executableAddress<>());
233}
234
235void JIT_OPERATION operationWasmTriggerOSREntryNow(Probe::Context& context)
236{
237 OSREntryData& osrEntryData = *context.arg<OSREntryData*>();
238 uint32_t functionIndex = osrEntryData.functionIndex();
239 uint32_t loopIndex = osrEntryData.loopIndex();
240 Instance* instance = Wasm::Context::tryLoadInstanceFromTLS();
241 if (!instance)
242 instance = context.gpr<Instance*>(Wasm::PinnedRegisterInfo::get().wasmContextInstancePointer);
243
244 auto returnWithoutOSREntry = [&] {
245 context.gpr(GPRInfo::argumentGPR0) = 0;
246 };
247
248 Wasm::CodeBlock& codeBlock = *instance->codeBlock();
249 ASSERT(instance->memory()->mode() == codeBlock.mode());
250
251 uint32_t functionIndexInSpace = functionIndex + codeBlock.functionImportCount();
252 ASSERT(codeBlock.wasmBBQCalleeFromFunctionIndexSpace(functionIndexInSpace).compilationMode() == Wasm::CompilationMode::BBQMode);
253 BBQCallee& callee = static_cast<BBQCallee&>(codeBlock.wasmBBQCalleeFromFunctionIndexSpace(functionIndexInSpace));
254 TierUpCount& tierUp = *callee.tierUpCount();
255 dataLogLnIf(Options::verboseOSR(), "Consider OMGForOSREntryPlan for [", functionIndex, "] loopIndex#", loopIndex, " with executeCounter = ", tierUp, " ", RawPointer(callee.replacement()));
256
257 if (!Options::useWebAssemblyOSR()) {
258 if (shouldTriggerOMGCompile(tierUp, callee.replacement(), functionIndex))
259 triggerOMGReplacementCompile(tierUp, callee.replacement(), instance, codeBlock, functionIndex);
260
261 // We already have an OMG replacement.
262 if (callee.replacement()) {
263 // No OSR entry points. Just defer indefinitely.
264 if (tierUp.osrEntryTriggers().isEmpty()) {
265 tierUp.dontOptimizeAnytimeSoon(functionIndex);
266 return;
267 }
268
269 // Found one OSR entry point. Since we do not have a way to jettison Wasm::Callee right now, this means that tierUp function is now meaningless.
270 // Not call it as much as possible.
271 if (callee.osrEntryCallee()) {
272 tierUp.dontOptimizeAnytimeSoon(functionIndex);
273 return;
274 }
275 }
276 return returnWithoutOSREntry();
277 }
278
279 TierUpCount::CompilationStatus compilationStatus = TierUpCount::CompilationStatus::NotCompiled;
280 {
281 auto locker = holdLock(tierUp.getLock());
282 compilationStatus = tierUp.m_compilationStatusForOMGForOSREntry;
283 }
284
285 bool triggeredSlowPathToStartCompilation = false;
286 switch (tierUp.osrEntryTriggers()[loopIndex]) {
287 case TierUpCount::TriggerReason::DontTrigger:
288 // The trigger isn't set, we entered because the counter reached its
289 // threshold.
290 break;
291 case TierUpCount::TriggerReason::CompilationDone:
292 // The trigger was set because compilation completed. Don't unset it
293 // so that further BBQ executions OSR enter as well.
294 break;
295 case TierUpCount::TriggerReason::StartCompilation: {
296 // We were asked to enter as soon as possible and start compiling an
297 // entry for the current loopIndex. Unset this trigger so we
298 // don't continually enter.
299 auto locker = holdLock(tierUp.getLock());
300 TierUpCount::TriggerReason reason = tierUp.osrEntryTriggers()[loopIndex];
301 if (reason == TierUpCount::TriggerReason::StartCompilation) {
302 tierUp.osrEntryTriggers()[loopIndex] = TierUpCount::TriggerReason::DontTrigger;
303 triggeredSlowPathToStartCompilation = true;
304 }
305 break;
306 }
307 }
308
309 if (compilationStatus == TierUpCount::CompilationStatus::StartCompilation) {
310 dataLogLnIf(Options::verboseOSR(), "delayOMGCompile still compiling for ", functionIndex);
311 tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
312 return returnWithoutOSREntry();
313 }
314
315 if (OMGForOSREntryCallee* osrEntryCallee = callee.osrEntryCallee()) {
316 if (osrEntryCallee->loopIndex() == loopIndex)
317 return doOSREntry(instance, context, callee, *osrEntryCallee, osrEntryData);
318 }
319
320 if (!shouldTriggerOMGCompile(tierUp, callee.replacement(), functionIndex) && !triggeredSlowPathToStartCompilation)
321 return returnWithoutOSREntry();
322
323 if (!triggeredSlowPathToStartCompilation) {
324 triggerOMGReplacementCompile(tierUp, callee.replacement(), instance, codeBlock, functionIndex);
325
326 if (!callee.replacement())
327 return returnWithoutOSREntry();
328 }
329
330 if (OMGForOSREntryCallee* osrEntryCallee = callee.osrEntryCallee()) {
331 if (osrEntryCallee->loopIndex() == loopIndex)
332 return doOSREntry(instance, context, callee, *osrEntryCallee, osrEntryData);
333 tierUp.dontOptimizeAnytimeSoon(functionIndex);
334 return returnWithoutOSREntry();
335 }
336
337 // Instead of triggering OSR entry compilation in inner loop, try outer loop's trigger immediately effective (setting TriggerReason::StartCompilation) and
338 // let outer loop attempt to compile.
339 if (!triggeredSlowPathToStartCompilation) {
340 // An inner loop didn't specifically ask for us to kick off a compilation. This means the counter
341 // crossed its threshold. We either fall through and kick off a compile for originBytecodeIndex,
342 // or we flag an outer loop to immediately try to compile itself. If there are outer loops,
343 // we first try to make them compile themselves. But we will eventually fall back to compiling
344 // a progressively inner loop if it takes too long for control to reach an outer loop.
345
346 auto tryTriggerOuterLoopToCompile = [&] {
347 // We start with the outermost loop and make our way inwards (hence why we iterate the vector in reverse).
348 // Our policy is that we will trigger an outer loop to compile immediately when program control reaches it.
349 // If program control is taking too long to reach that outer loop, we progressively move inwards, meaning,
350 // we'll eventually trigger some loop that is executing to compile. We start with trying to compile outer
351 // loops since we believe outer loop compilations reveal the best opportunities for optimizing code.
352 uint32_t currentLoopIndex = tierUp.outerLoops()[loopIndex];
353 auto locker = holdLock(tierUp.getLock());
354
355 // We already started OMGForOSREntryPlan.
356 if (callee.didStartCompilingOSREntryCallee())
357 return false;
358
359 while (currentLoopIndex != UINT32_MAX) {
360 if (tierUp.osrEntryTriggers()[currentLoopIndex] == TierUpCount::TriggerReason::StartCompilation) {
361 // This means that we already asked this loop to compile. If we've reached here, it
362 // means program control has not yet reached that loop. So it's taking too long to compile.
363 // So we move on to asking the inner loop of this loop to compile itself.
364 currentLoopIndex = tierUp.outerLoops()[currentLoopIndex];
365 continue;
366 }
367
368 // This is where we ask the outer to loop to immediately compile itself if program
369 // control reaches it.
370 dataLogLnIf(Options::verboseOSR(), "Inner-loop loopIndex#", loopIndex, " in ", functionIndex, " setting parent loop loopIndex#", currentLoopIndex, "'s trigger and backing off.");
371 tierUp.osrEntryTriggers()[currentLoopIndex] = TierUpCount::TriggerReason::StartCompilation;
372 return true;
373 }
374 return false;
375 };
376
377 if (tryTriggerOuterLoopToCompile()) {
378 tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
379 return returnWithoutOSREntry();
380 }
381 }
382
383 bool startOSREntryCompilation = false;
384 {
385 auto locker = holdLock(tierUp.getLock());
386 if (tierUp.m_compilationStatusForOMGForOSREntry == TierUpCount::CompilationStatus::NotCompiled) {
387 tierUp.m_compilationStatusForOMGForOSREntry = TierUpCount::CompilationStatus::StartCompilation;
388 startOSREntryCompilation = true;
389 // Currently, we do not have a way to jettison wasm code. This means that once we decide to compile OSR entry code for a particular loopIndex,
390 // we cannot throw the compiled code so long as Wasm module is live. We immediately disable all the triggers.
391 for (auto& trigger : tierUp.osrEntryTriggers())
392 trigger = TierUpCount::TriggerReason::DontTrigger;
393 }
394 }
395
396 if (startOSREntryCompilation) {
397 dataLogLnIf(Options::verboseOSR(), "triggerOMGOSR for ", functionIndex);
398 Ref<Plan> plan = adoptRef(*new OMGForOSREntryPlan(instance->context(), Ref<Wasm::Module>(instance->module()), Ref<Wasm::BBQCallee>(callee), functionIndex, loopIndex, codeBlock.mode(), Plan::dontFinalize()));
399 ensureWorklist().enqueue(plan.copyRef());
400 if (UNLIKELY(!Options::useConcurrentJIT()))
401 plan->waitForCompletion();
402 else
403 tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
404 }
405
406 OMGForOSREntryCallee* osrEntryCallee = callee.osrEntryCallee();
407 if (!osrEntryCallee) {
408 tierUp.setOptimizationThresholdBasedOnCompilationResult(functionIndex, CompilationDeferred);
409 return returnWithoutOSREntry();
410 }
411
412 if (osrEntryCallee->loopIndex() == loopIndex)
413 return doOSREntry(instance, context, callee, *osrEntryCallee, osrEntryData);
414
415 tierUp.dontOptimizeAnytimeSoon(functionIndex);
416 return returnWithoutOSREntry();
417}
418
419void JIT_OPERATION operationWasmTriggerTierUpNow(Instance* instance, uint32_t functionIndex)
420{
421 Wasm::CodeBlock& codeBlock = *instance->codeBlock();
422 ASSERT(instance->memory()->mode() == codeBlock.mode());
423
424 uint32_t functionIndexInSpace = functionIndex + codeBlock.functionImportCount();
425 ASSERT(codeBlock.wasmBBQCalleeFromFunctionIndexSpace(functionIndexInSpace).compilationMode() == Wasm::CompilationMode::BBQMode);
426 BBQCallee& callee = static_cast<BBQCallee&>(codeBlock.wasmBBQCalleeFromFunctionIndexSpace(functionIndexInSpace));
427 TierUpCount& tierUp = *callee.tierUpCount();
428 dataLogLnIf(Options::verboseOSR(), "Consider OMGPlan for [", functionIndex, "] with executeCounter = ", tierUp, " ", RawPointer(callee.replacement()));
429
430 if (shouldTriggerOMGCompile(tierUp, callee.replacement(), functionIndex))
431 triggerOMGReplacementCompile(tierUp, callee.replacement(), instance, codeBlock, functionIndex);
432
433 // We already have an OMG replacement.
434 if (callee.replacement()) {
435 // No OSR entry points. Just defer indefinitely.
436 if (tierUp.osrEntryTriggers().isEmpty()) {
437 dataLogLnIf(Options::verboseOSR(), "delayOMGCompile replacement in place, delaying indefinitely for ", functionIndex);
438 tierUp.dontOptimizeAnytimeSoon(functionIndex);
439 return;
440 }
441
442 // Found one OSR entry point. Since we do not have a way to jettison Wasm::Callee right now, this means that tierUp function is now meaningless.
443 // Not call it as much as possible.
444 if (callee.osrEntryCallee()) {
445 dataLogLnIf(Options::verboseOSR(), "delayOMGCompile trigger in place, delaying indefinitely for ", functionIndex);
446 tierUp.dontOptimizeAnytimeSoon(functionIndex);
447 return;
448 }
449 }
450}
451
452void JIT_OPERATION operationWasmUnwind(CallFrame* callFrame)
453{
454 // FIXME: Consider passing JSWebAssemblyInstance* instead.
455 // https://bugs.webkit.org/show_bug.cgi?id=203206
456 VM& vm = callFrame->deprecatedVM();
457 NativeCallFrameTracer tracer(vm, callFrame);
458 genericUnwind(vm, callFrame);
459 ASSERT(!!vm.callFrameForCatch);
460}
461
462double JIT_OPERATION operationConvertToF64(CallFrame* callFrame, JSValue v)
463{
464 // FIXME: Consider passing JSWebAssemblyInstance* instead.
465 // https://bugs.webkit.org/show_bug.cgi?id=203206
466 VM& vm = callFrame->deprecatedVM();
467 NativeCallFrameTracer tracer(vm, callFrame);
468 return v.toNumber(callFrame->lexicalGlobalObject(vm));
469}
470
471int32_t JIT_OPERATION operationConvertToI32(CallFrame* callFrame, JSValue v)
472{
473 // FIXME: Consider passing JSWebAssemblyInstance* instead.
474 // https://bugs.webkit.org/show_bug.cgi?id=203206
475 VM& vm = callFrame->deprecatedVM();
476 NativeCallFrameTracer tracer(vm, callFrame);
477 return v.toInt32(callFrame->lexicalGlobalObject(vm));
478}
479
480float JIT_OPERATION operationConvertToF32(CallFrame* callFrame, JSValue v)
481{
482 // FIXME: Consider passing JSWebAssemblyInstance* instead.
483 // https://bugs.webkit.org/show_bug.cgi?id=203206
484 VM& vm = callFrame->deprecatedVM();
485 NativeCallFrameTracer tracer(vm, callFrame);
486 return static_cast<float>(v.toNumber(callFrame->lexicalGlobalObject(vm)));
487}
488
489void JIT_OPERATION operationIterateResults(CallFrame* callFrame, Instance* instance, const Signature* signature, JSValue result, uint64_t* registerResults, uint64_t* calleeFramePointer)
490{
491 // FIXME: Consider passing JSWebAssemblyInstance* instead.
492 // https://bugs.webkit.org/show_bug.cgi?id=203206
493 JSWebAssemblyInstance* jsInstance = instance->owner<JSWebAssemblyInstance>();
494 JSGlobalObject* globalObject = jsInstance->globalObject();
495 VM& vm = globalObject->vm();
496 NativeCallFrameTracer(vm, callFrame);
497 auto scope = DECLARE_THROW_SCOPE(vm);
498
499 auto wasmCallInfo = wasmCallingConvention().callInformationFor(*signature, CallRole::Callee);
500 RegisterAtOffsetList registerResultOffsets = wasmCallInfo.computeResultsOffsetList();
501
502 unsigned itemsInserted = 0;
503 forEachInIterable(globalObject, result, [&] (VM& vm, JSGlobalObject* globalObject, JSValue value) -> void {
504 auto scope = DECLARE_THROW_SCOPE(vm);
505 if (itemsInserted < signature->returnCount()) {
506 uint64_t unboxedValue;
507 switch (signature->returnType(itemsInserted)) {
508 case I32:
509 unboxedValue = value.toInt32(globalObject);
510 break;
511 case F32:
512 unboxedValue = bitwise_cast<uint32_t>(value.toFloat(globalObject));
513 break;
514 case F64:
515 unboxedValue = bitwise_cast<uint64_t>(value.toNumber(globalObject));
516 break;
517 case Funcref:
518 if (!value.isFunction(vm)) {
519 throwTypeError(globalObject, scope, "Funcref value is not a function"_s);
520 return;
521 }
522 FALLTHROUGH;
523 case Anyref:
524 unboxedValue = bitwise_cast<uint64_t>(value);
525 RELEASE_ASSERT(Options::useWebAssemblyReferences());
526 break;
527 default:
528 RELEASE_ASSERT_NOT_REACHED();
529 }
530
531 RETURN_IF_EXCEPTION(scope, void());
532 auto rep = wasmCallInfo.results[itemsInserted];
533 if (rep.isReg())
534 registerResults[registerResultOffsets.find(rep.reg())->offset() / sizeof(uint64_t)] = unboxedValue;
535 else
536 calleeFramePointer[rep.offsetFromFP() / sizeof(uint64_t)] = unboxedValue;
537 }
538 itemsInserted++;
539 });
540 RETURN_IF_EXCEPTION(scope, void());
541 if (itemsInserted != signature->returnCount())
542 throwVMTypeError(globalObject, scope, "Incorrect number of values returned to Wasm from JS");
543}
544
545// FIXME: It would be much easier to inline this when we have a global GC, which could probably mean we could avoid
546// spilling the results onto the stack.
547// Saved result registers should be placed on the stack just above the last stack result.
548JSArray* JIT_OPERATION operationAllocateResultsArray(CallFrame* callFrame, Wasm::Instance* instance, const Signature* signature, IndexingType indexingType, JSValue* stackPointerFromCallee)
549{
550 JSWebAssemblyInstance* jsInstance = instance->owner<JSWebAssemblyInstance>();
551 VM& vm = jsInstance->vm();
552 NativeCallFrameTracer tracer(vm, callFrame);
553
554 JSGlobalObject* globalObject = jsInstance->globalObject();
555 ObjectInitializationScope initializationScope(globalObject->vm());
556 JSArray* result = JSArray::tryCreateUninitializedRestricted(initializationScope, nullptr, globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType), signature->returnCount());
557
558 // FIXME: Handle allocation failure...
559 RELEASE_ASSERT(result);
560
561 auto wasmCallInfo = wasmCallingConvention().callInformationFor(*signature);
562 RegisterAtOffsetList registerResults = wasmCallInfo.computeResultsOffsetList();
563
564 static_assert(sizeof(JSValue) == sizeof(CPURegister), "The code below relies on this.");
565 for (unsigned i = 0; i < signature->returnCount(); ++i) {
566 B3::ValueRep rep = wasmCallInfo.results[i];
567 JSValue value;
568 if (rep.isReg())
569 value = stackPointerFromCallee[(registerResults.find(rep.reg())->offset() + wasmCallInfo.headerAndArgumentStackSizeInBytes) / sizeof(JSValue)];
570 else
571 value = stackPointerFromCallee[rep.offsetFromSP() / sizeof(JSValue)];
572 result->initializeIndex(initializationScope, i, value);
573 }
574
575 ASSERT(result->indexingType() == indexingType);
576 return result;
577}
578
579void JIT_OPERATION operationWasmWriteBarrierSlowPath(JSWebAssemblyInstance* cell, VM* vmPointer)
580{
581 ASSERT(cell);
582 ASSERT(vmPointer);
583 VM& vm = *vmPointer;
584 vm.heap.writeBarrierSlowPath(cell);
585}
586
587uint32_t JIT_OPERATION operationPopcount32(int32_t value)
588{
589 return __builtin_popcount(value);
590}
591
592uint64_t JIT_OPERATION operationPopcount64(int64_t value)
593{
594 return __builtin_popcountll(value);
595}
596
597int32_t JIT_OPERATION operationGrowMemory(void* callFrame, Instance* instance, int32_t delta)
598{
599 instance->storeTopCallFrame(callFrame);
600
601 if (delta < 0)
602 return -1;
603
604 auto grown = instance->memory()->grow(PageCount(delta));
605 if (!grown) {
606 switch (grown.error()) {
607 case Memory::GrowFailReason::InvalidDelta:
608 case Memory::GrowFailReason::InvalidGrowSize:
609 case Memory::GrowFailReason::WouldExceedMaximum:
610 case Memory::GrowFailReason::OutOfMemory:
611 return -1;
612 }
613 RELEASE_ASSERT_NOT_REACHED();
614 }
615
616 return grown.value().pageCount();
617}
618
619EncodedJSValue JIT_OPERATION operationGetWasmTableElement(Instance* instance, unsigned tableIndex, int32_t signedIndex)
620{
621 ASSERT(tableIndex < instance->module().moduleInformation().tableCount());
622 if (signedIndex < 0)
623 return 0;
624
625 uint32_t index = signedIndex;
626 if (index >= instance->table(tableIndex)->length())
627 return 0;
628
629 return JSValue::encode(instance->table(tableIndex)->get(index));
630}
631
632static bool setWasmTableElement(Instance* instance, unsigned tableIndex, int32_t signedIndex, EncodedJSValue encValue)
633{
634 ASSERT(tableIndex < instance->module().moduleInformation().tableCount());
635 if (signedIndex < 0)
636 return false;
637
638 uint32_t index = signedIndex;
639 if (index >= instance->table(tableIndex)->length())
640 return false;
641
642 JSValue value = JSValue::decode(encValue);
643 if (instance->table(tableIndex)->type() == Wasm::TableElementType::Anyref)
644 instance->table(tableIndex)->set(index, value);
645 else if (instance->table(tableIndex)->type() == Wasm::TableElementType::Funcref) {
646 WebAssemblyFunction* wasmFunction;
647 WebAssemblyWrapperFunction* wasmWrapperFunction;
648
649 if (isWebAssemblyHostFunction(instance->owner<JSObject>()->vm(), value, wasmFunction, wasmWrapperFunction)) {
650 ASSERT(!!wasmFunction || !!wasmWrapperFunction);
651 if (wasmFunction)
652 instance->table(tableIndex)->asFuncrefTable()->setFunction(index, jsCast<JSObject*>(value), wasmFunction->importableFunction(), &wasmFunction->instance()->instance());
653 else
654 instance->table(tableIndex)->asFuncrefTable()->setFunction(index, jsCast<JSObject*>(value), wasmWrapperFunction->importableFunction(), &wasmWrapperFunction->instance()->instance());
655 } else if (value.isNull())
656 instance->table(tableIndex)->clear(index);
657 else
658 ASSERT_NOT_REACHED();
659 } else
660 ASSERT_NOT_REACHED();
661
662 return true;
663}
664
665bool JIT_OPERATION operationSetWasmTableElement(Instance* instance, unsigned tableIndex, int32_t signedIndex, EncodedJSValue encValue)
666{
667 return setWasmTableElement(instance, tableIndex, signedIndex, encValue);
668}
669
670int32_t JIT_OPERATION operationWasmTableGrow(Instance* instance, unsigned tableIndex, EncodedJSValue fill, int32_t delta)
671{
672 ASSERT(tableIndex < instance->module().moduleInformation().tableCount());
673 auto oldSize = instance->table(tableIndex)->length();
674 if (delta < 0)
675 return oldSize;
676 auto newSize = instance->table(tableIndex)->grow(delta);
677 if (!newSize || *newSize == oldSize)
678 return -1;
679
680 for (unsigned i = oldSize; i < instance->table(tableIndex)->length(); ++i)
681 setWasmTableElement(instance, tableIndex, i, fill);
682
683 return oldSize;
684}
685
686bool JIT_OPERATION operationWasmTableFill(Instance* instance, unsigned tableIndex, int32_t unsafeOffset, EncodedJSValue fill, int32_t unsafeCount)
687{
688 ASSERT(tableIndex < instance->module().moduleInformation().tableCount());
689 if (unsafeOffset < 0 || unsafeCount < 0)
690 return false;
691
692 unsigned offset = unsafeOffset;
693 unsigned count = unsafeCount;
694
695 if (offset >= instance->table(tableIndex)->length() || offset + count > instance->table(tableIndex)->length())
696 return false;
697
698 for (unsigned j = 0; j < count; ++j)
699 setWasmTableElement(instance, tableIndex, offset + j, fill);
700
701 return true;
702}
703
704EncodedJSValue JIT_OPERATION operationWasmRefFunc(Instance* instance, uint32_t index)
705{
706 JSValue value = instance->getFunctionWrapper(index);
707 ASSERT(value.isFunction(instance->owner<JSObject>()->vm()));
708 return JSValue::encode(value);
709}
710
711int32_t JIT_OPERATION operationGetWasmTableSize(Instance* instance, unsigned tableIndex)
712{
713 return instance->table(tableIndex)->length();
714}
715
716void* JIT_OPERATION operationWasmToJSException(CallFrame* callFrame, Wasm::ExceptionType type, Instance* wasmInstance)
717{
718 wasmInstance->storeTopCallFrame(callFrame);
719 JSWebAssemblyInstance* instance = wasmInstance->owner<JSWebAssemblyInstance>();
720 JSGlobalObject* globalObject = instance->globalObject();
721
722 // Do not retrieve VM& from CallFrame since CallFrame's callee is not a JSCell.
723 VM& vm = globalObject->vm();
724
725 {
726 auto throwScope = DECLARE_THROW_SCOPE(vm);
727
728 JSObject* error;
729 if (type == ExceptionType::StackOverflow)
730 error = createStackOverflowError(globalObject);
731 else
732 error = JSWebAssemblyRuntimeError::create(globalObject, vm, globalObject->webAssemblyRuntimeErrorStructure(), Wasm::errorMessageForExceptionType(type));
733 throwException(globalObject, throwScope, error);
734 }
735
736 genericUnwind(vm, callFrame);
737 ASSERT(!!vm.callFrameForCatch);
738 ASSERT(!!vm.targetMachinePCForThrow);
739 // FIXME: We could make this better:
740 // This is a total hack, but the llint (both op_catch and handleUncaughtException)
741 // require a cell in the callee field to load the VM. (The baseline JIT does not require
742 // this since it is compiled with a constant VM pointer.) We could make the calling convention
743 // for exceptions first load callFrameForCatch info call frame register before jumping
744 // to the exception handler. If we did this, we could remove this terrible hack.
745 // https://bugs.webkit.org/show_bug.cgi?id=170440
746 bitwise_cast<uint64_t*>(callFrame)[CallFrameSlot::callee] = bitwise_cast<uint64_t>(instance->webAssemblyToJSCallee());
747 return vm.targetMachinePCForThrow;
748}
749
750} } // namespace JSC::Wasm
751
752IGNORE_WARNINGS_END
753
754#endif // ENABLE(WEBASSEMBLY)
755