Skip to content

Commit

Permalink
Add first implementation of Wasm Audio Worklets, based on Wasm Workers.
Browse files Browse the repository at this point in the history
Fix Chrome not continuing the AudioWorklet processing if a number 1 is returned from the callback - must return 'true' specifically.

Add new tone generator sample.

Adjust comment.

Use MessagePort.onmessage instead of add/removeEventListener(), since onmessage .start()s the MessagePort automatically.

Fix name noise-generator to tone-generator
  • Loading branch information
juj committed Nov 16, 2021
1 parent 246211d commit 4aa53f4
Show file tree
Hide file tree
Showing 13 changed files with 778 additions and 11 deletions.
20 changes: 19 additions & 1 deletion emcc.py
Original file line number Diff line number Diff line change
Expand Up @@ -1967,6 +1967,11 @@ def default_setting(name, new_default):
settings.WASM_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.ww.js'
settings.JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_wasm_worker.js')))

if settings.AUDIO_WORKLET:
if settings.AUDIO_WORKLET == 1:
settings.AUDIO_WORKLET_FILE = unsuffixed(os.path.basename(target)) + '.aw.js'
settings.JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_webaudio.js')))

if settings.FORCE_FILESYSTEM and not settings.MINIMAL_RUNTIME:
# when the filesystem is forced, we export by default methods that filesystem usage
# may need, including filesystem usage from standalone file packager output (i.e.
Expand Down Expand Up @@ -2745,6 +2750,16 @@ def phase_final_emitting(options, state, target, wasm_target, memfile):
minified_worker = building.acorn_optimizer(worker_output, ['minifyWhitespace'], return_output=True)
open(worker_output, 'w').write(minified_worker)

if settings.AUDIO_WORKLET == 1:
worklet_output = os.path.join(target_dir, settings.AUDIO_WORKLET_FILE)
with open(worklet_output, 'w') as f:
f.write(shared.read_and_preprocess(shared.path_from_root('src', 'audio_worklet.js'), expand_macros=True))

# Minify the audio_worklet.js file in optimized builds
if (settings.OPT_LEVEL >= 1 or settings.SHRINK_LEVEL >= 1) and not settings.DEBUG_LEVEL:
minified_worker = building.acorn_optimizer(worklet_output, ['minifyWhitespace'], return_output=True)
open(worklet_output, 'w').write(minified_worker)

# track files that will need native eols
generated_text_files_with_native_eols = []

Expand Down Expand Up @@ -3437,7 +3452,10 @@ def module_export_name_substitution():
# For Node.js and other shell environments, create an unminified Module object so that
# loading external .asm.js file that assigns to Module['asm'] works even when Closure is used.
if settings.MINIMAL_RUNTIME and (shared.target_environment_may_be('node') or shared.target_environment_may_be('shell')):
src = 'if(typeof Module==="undefined"){var Module={};}\n' + src
if settings.AUDIO_WORKLET: # But don't clobber the Module object defined in an Audio Worklet.
src = 'if(typeof Module==="undefined"){var Module=globalThis.Module||{};}\n' + src
else:
src = 'if(typeof Module==="undefined"){var Module={};}\n' + src
write_file(final_js, src)
shared.configuration.get_temp_files().note(final_js)
save_intermediate('module_export_name_substitution')
Expand Down
159 changes: 159 additions & 0 deletions src/audio_worklet.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
// AudioWorkletGlobalScope does not have a onmessage/postMessage() functionality at the global scope, which
// means that after creating an AudioWorkletGlobalScope and loading this script into it, we cannot
// postMessage() information into it like one would do with Web Workers.

// Instead, we must create an AudioWorkletProcessor class, then instantiate a Web Audio graph node from it
// on the main thread. Using its message port and the node constructor's
// "processorOptions" field, we can share the necessary bootstrap information from the main thread to
// the AudioWorkletGlobalScope.

function createWasmAudioWorkletProcessor(audioParams) {
class WasmAudioWorkletProcessor extends AudioWorkletProcessor {
constructor(args) {
super();

// Copy needed stack allocation functions from the Module object
// to global scope, these will be accessed in hot paths, so maybe
// they'll be a bit faster to access directly, rather than referencing
// them as properties of the Module object.
globalThis.stackAlloc = Module['stackAlloc'];
globalThis.stackSave = Module['stackSave'];
globalThis.stackRestore = Module['stackRestore'];
globalThis.HEAPU32 = Module['HEAPU32'];
globalThis.HEAPF32 = Module['HEAPF32'];

// Capture the Wasm function callback to invoke.
let opts = args.processorOptions;
this.callbackFunction = Module['wasmTable'].get(opts.callback);
this.userData = opts.userData;
}

static get parameterDescriptors() {
return audioParams;
}

process(inputList, outputList, parameters) {
// Marshal all inputs and parameters to the Wasm memory on the thread stack,
// then perform the wasm audio worklet call,
// and finally marshal audio output data back.

let numInputs = inputList.length,
numOutputs = outputList.length,
numParams = 0, i, j, k, dataPtr,
stackMemoryNeeded = (numInputs + numOutputs) * 8,
oldStackPtr = stackSave(),
inputsPtr, outputsPtr, outputDataPtr, paramsPtr,
didProduceAudio, paramArray;

// Calculate how much stack space is needed.
for(i of inputList) stackMemoryNeeded += i.length * 512;
for(i of outputList) stackMemoryNeeded += i.length * 512;
for(i in parameters) stackMemoryNeeded += parameters[i].byteLength + 8, ++numParams;

// Allocate the necessary stack space.
inputsPtr = stackAlloc(stackMemoryNeeded);

// Copy input audio descriptor structs and data to Wasm
k = inputsPtr >> 2;
dataPtr = inputsPtr + numInputs * 8;
for(i of inputList) {
// Write the AudioSampleFrame struct instance
HEAPU32[k++] = i.length;
HEAPU32[k++] = dataPtr;
// Marshal the input audio sample data for each audio channel of this input
for(j of i) {
HEAPF32.set(j, dataPtr>>2);
dataPtr += 512;
}
}

// Copy output audio descriptor structs to Wasm
outputsPtr = dataPtr;
k = outputsPtr >> 2;
outputDataPtr = (dataPtr += numOutputs * 8) >> 2;
for(i of outputList) {
// Write the AudioSampleFrame struct instance
HEAPU32[k++] = i.length;
HEAPU32[k++] = dataPtr;
// Reserve space for the output data
dataPtr += 512 * i.length;
}

// Copy parameters descriptor structs and data to Wasm
paramsPtr = dataPtr;
k = paramsPtr >> 2;
dataPtr += numParams * 8;
for(i = 0; paramArray = parameters[i++];) {
// Write the AudioParamFrame struct instance
HEAPU32[k++] = paramArray.length;
HEAPU32[k++] = dataPtr;
// Marshal the audio parameters array
HEAPF32.set(paramArray, dataPtr>>2);
dataPtr += paramArray.length*4;
}

// Call out to Wasm callback to perform audio processing
didProduceAudio = this.callbackFunction(numInputs, inputsPtr, numOutputs, outputsPtr, numParams, paramsPtr, this.userData);

if (didProduceAudio) {
// Read back the produced audio data to all outputs and their channels.
// (A garbage-free function TypedArray.copy(dstTypedArray, dstOffset, srcTypedArray, srcOffset, count) would sure be handy..
// but web does not have one, so manually copy all bytes in)
for(i of outputList) {
for(j of i) {
for(k = 0; k < 128; ++k) {
j[k] = HEAPF32[outputDataPtr++];
}
}
}
}

stackRestore(oldStackPtr);

// Return 'true' to tell the browser to continue running this processor. (Returning 1 or any other truthy value won't work in Chrome)
return !!didProduceAudio;
}
}
return WasmAudioWorkletProcessor;
}

// Specify a worklet processor that will be used to receive messages to this AudioWorkletGlobalScope.
// We never connect this initial AudioWorkletProcessor to the audio graph to do any audio processing.
class BootstrapMessages extends AudioWorkletProcessor {
constructor(arg) {
super();
// Initialize the global Emscripten Module object that contains e.g. the Wasm Module and Memory objects.
// After this we are ready to load in the main application JS script, which the main thread will addModule()
// to this scope.
globalThis.Module = arg['processorOptions'];
#if WEBAUDIO_DEBUG
console.log('AudioWorklet global scope looks like this:');
console.dir(globalThis);
#endif
// Listen to messages from the main thread. These messages will ask this scope to create the real
// AudioWorkletProcessors that call out to Wasm to do audio processing.
let p = this.port;
p.onmessage = (msg) => {
let d = msg.data;
// Register a real AudioWorkletProcessor that will actually do audio processing.
registerProcessor(d['name'], createWasmAudioWorkletProcessor(d['audioParams']));
#if WEBAUDIO_DEBUG
console.log(`Registered a new WasmAudioWorkletProcessor "${d['name']}" with AudioParams: ${d['audioParams']}`);
#endif
// Post a message back telling that we have now registered the AudioWorkletProcessor class.
// This message does not need to contain any information - just need to let the main thread know that
// the processor can now be instantiated.
p.postMessage(0);
}
}

// No-op, not doing audio processing in this processor. It is just for receiving bootstrap messages.
process() {
#if ASSERTIONS
console.error('This function should not be getting called ever!');
#endif
}
};

// Register the dummy processor that will just receive messages.
registerProcessor("message", BootstrapMessages);
22 changes: 12 additions & 10 deletions src/library_wasm_worker.js
Original file line number Diff line number Diff line change
Expand Up @@ -43,16 +43,18 @@ mergeInto(LibraryManager.library, {
___set_stack_limits(_emscripten_stack_get_base(), _emscripten_stack_get_end());
#endif

// The Wasm Worker runtime is now up, so we can start processing
// any postMessage function calls that have been received. Drop the temp
// message handler that appended incoming postMessage function calls to a queue ...
removeEventListener('message', __wasm_worker_appendToQueue);
// ... then flush whatever messages we may have gotten in the queue ...
__wasm_worker_delayedMessageQueue.forEach(__wasm_worker_runPostMessage);
__wasm_worker_delayedMessageQueue = null;
// ... and finally register the proper postMessage handler that immediately
// dispatches incoming function calls without queueing them.
addEventListener('message', __wasm_worker_runPostMessage);
if (!ENVIRONMENT_IS_AUDIO_WORKLET) {
// The Wasm Worker runtime is now up, so we can start processing
// any postMessage function calls that have been received. Drop the temp
// message handler that appended incoming postMessage function calls to a queue ...
removeEventListener('message', __wasm_worker_appendToQueue);
// ... then flush whatever messages we may have gotten in the queue ...
__wasm_worker_delayedMessageQueue.forEach(__wasm_worker_runPostMessage);
__wasm_worker_delayedMessageQueue = null;
// ... and finally register the proper postMessage handler that immediately
// dispatches incoming function calls without queueing them.
addEventListener('message', __wasm_worker_runPostMessage);
}
},

#if WASM_WORKERS == 2
Expand Down
Loading

0 comments on commit 4aa53f4

Please sign in to comment.