From 4aa53f43af938d5d55f4ce54b0612387cbcffd21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Jyl=C3=A4nki?= Date: Sun, 24 Oct 2021 18:47:25 +0300 Subject: [PATCH] Add first implementation of Wasm Audio Worklets, based on Wasm Workers. Fix Chrome not continuing the AudioWorklet processing if a number 1 is returned from the callback - must return 'true' specifically. Add new tone generator sample. Adjust comment. Use MessagePort.onmessage instead of add/removeEventListener(), since onmessage .start()s the MessagePort automatically. Fix name noise-generator to tone-generator --- emcc.py | 20 ++- src/audio_worklet.js | 159 +++++++++++++++++++++ src/library_wasm_worker.js | 22 +-- src/library_webaudio.js | 203 +++++++++++++++++++++++++++ src/postamble_minimal.js | 10 ++ src/preamble_minimal.js | 5 + src/settings.js | 8 ++ src/settings_internal.js | 3 + src/shell_minimal.js | 4 + system/include/emscripten/webaudio.h | 109 ++++++++++++++ tests/webaudio/audioworklet.c | 90 ++++++++++++ tests/webaudio/create_webaudio.c | 31 ++++ tests/webaudio/tone_generator.c | 125 +++++++++++++++++ 13 files changed, 778 insertions(+), 11 deletions(-) create mode 100644 src/audio_worklet.js create mode 100644 src/library_webaudio.js create mode 100644 system/include/emscripten/webaudio.h create mode 100644 tests/webaudio/audioworklet.c create mode 100644 tests/webaudio/create_webaudio.c create mode 100644 tests/webaudio/tone_generator.c diff --git a/emcc.py b/emcc.py index fa59907832f3e..03f75e13827d3 100755 --- a/emcc.py +++ b/emcc.py @@ -1967,6 +1967,11 @@ def default_setting(name, new_default): settings.WASM_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.ww.js' settings.JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_wasm_worker.js'))) + if settings.AUDIO_WORKLET: + if settings.AUDIO_WORKLET == 1: + settings.AUDIO_WORKLET_FILE = unsuffixed(os.path.basename(target)) + '.aw.js' + settings.JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_webaudio.js'))) + if settings.FORCE_FILESYSTEM and not settings.MINIMAL_RUNTIME: # when the filesystem is forced, we export by default methods that filesystem usage # may need, including filesystem usage from standalone file packager output (i.e. @@ -2745,6 +2750,16 @@ def phase_final_emitting(options, state, target, wasm_target, memfile): minified_worker = building.acorn_optimizer(worker_output, ['minifyWhitespace'], return_output=True) open(worker_output, 'w').write(minified_worker) + if settings.AUDIO_WORKLET == 1: + worklet_output = os.path.join(target_dir, settings.AUDIO_WORKLET_FILE) + with open(worklet_output, 'w') as f: + f.write(shared.read_and_preprocess(shared.path_from_root('src', 'audio_worklet.js'), expand_macros=True)) + + # Minify the audio_worklet.js file in optimized builds + if (settings.OPT_LEVEL >= 1 or settings.SHRINK_LEVEL >= 1) and not settings.DEBUG_LEVEL: + minified_worker = building.acorn_optimizer(worklet_output, ['minifyWhitespace'], return_output=True) + open(worklet_output, 'w').write(minified_worker) + # track files that will need native eols generated_text_files_with_native_eols = [] @@ -3437,7 +3452,10 @@ def module_export_name_substitution(): # For Node.js and other shell environments, create an unminified Module object so that # loading external .asm.js file that assigns to Module['asm'] works even when Closure is used. if settings.MINIMAL_RUNTIME and (shared.target_environment_may_be('node') or shared.target_environment_may_be('shell')): - src = 'if(typeof Module==="undefined"){var Module={};}\n' + src + if settings.AUDIO_WORKLET: # But don't clobber the Module object defined in an Audio Worklet. + src = 'if(typeof Module==="undefined"){var Module=globalThis.Module||{};}\n' + src + else: + src = 'if(typeof Module==="undefined"){var Module={};}\n' + src write_file(final_js, src) shared.configuration.get_temp_files().note(final_js) save_intermediate('module_export_name_substitution') diff --git a/src/audio_worklet.js b/src/audio_worklet.js new file mode 100644 index 0000000000000..d9234abec3643 --- /dev/null +++ b/src/audio_worklet.js @@ -0,0 +1,159 @@ +// AudioWorkletGlobalScope does not have a onmessage/postMessage() functionality at the global scope, which +// means that after creating an AudioWorkletGlobalScope and loading this script into it, we cannot +// postMessage() information into it like one would do with Web Workers. + +// Instead, we must create an AudioWorkletProcessor class, then instantiate a Web Audio graph node from it +// on the main thread. Using its message port and the node constructor's +// "processorOptions" field, we can share the necessary bootstrap information from the main thread to +// the AudioWorkletGlobalScope. + +function createWasmAudioWorkletProcessor(audioParams) { + class WasmAudioWorkletProcessor extends AudioWorkletProcessor { + constructor(args) { + super(); + + // Copy needed stack allocation functions from the Module object + // to global scope, these will be accessed in hot paths, so maybe + // they'll be a bit faster to access directly, rather than referencing + // them as properties of the Module object. + globalThis.stackAlloc = Module['stackAlloc']; + globalThis.stackSave = Module['stackSave']; + globalThis.stackRestore = Module['stackRestore']; + globalThis.HEAPU32 = Module['HEAPU32']; + globalThis.HEAPF32 = Module['HEAPF32']; + + // Capture the Wasm function callback to invoke. + let opts = args.processorOptions; + this.callbackFunction = Module['wasmTable'].get(opts.callback); + this.userData = opts.userData; + } + + static get parameterDescriptors() { + return audioParams; + } + + process(inputList, outputList, parameters) { + // Marshal all inputs and parameters to the Wasm memory on the thread stack, + // then perform the wasm audio worklet call, + // and finally marshal audio output data back. + + let numInputs = inputList.length, + numOutputs = outputList.length, + numParams = 0, i, j, k, dataPtr, + stackMemoryNeeded = (numInputs + numOutputs) * 8, + oldStackPtr = stackSave(), + inputsPtr, outputsPtr, outputDataPtr, paramsPtr, + didProduceAudio, paramArray; + + // Calculate how much stack space is needed. + for(i of inputList) stackMemoryNeeded += i.length * 512; + for(i of outputList) stackMemoryNeeded += i.length * 512; + for(i in parameters) stackMemoryNeeded += parameters[i].byteLength + 8, ++numParams; + + // Allocate the necessary stack space. + inputsPtr = stackAlloc(stackMemoryNeeded); + + // Copy input audio descriptor structs and data to Wasm + k = inputsPtr >> 2; + dataPtr = inputsPtr + numInputs * 8; + for(i of inputList) { + // Write the AudioSampleFrame struct instance + HEAPU32[k++] = i.length; + HEAPU32[k++] = dataPtr; + // Marshal the input audio sample data for each audio channel of this input + for(j of i) { + HEAPF32.set(j, dataPtr>>2); + dataPtr += 512; + } + } + + // Copy output audio descriptor structs to Wasm + outputsPtr = dataPtr; + k = outputsPtr >> 2; + outputDataPtr = (dataPtr += numOutputs * 8) >> 2; + for(i of outputList) { + // Write the AudioSampleFrame struct instance + HEAPU32[k++] = i.length; + HEAPU32[k++] = dataPtr; + // Reserve space for the output data + dataPtr += 512 * i.length; + } + + // Copy parameters descriptor structs and data to Wasm + paramsPtr = dataPtr; + k = paramsPtr >> 2; + dataPtr += numParams * 8; + for(i = 0; paramArray = parameters[i++];) { + // Write the AudioParamFrame struct instance + HEAPU32[k++] = paramArray.length; + HEAPU32[k++] = dataPtr; + // Marshal the audio parameters array + HEAPF32.set(paramArray, dataPtr>>2); + dataPtr += paramArray.length*4; + } + + // Call out to Wasm callback to perform audio processing + didProduceAudio = this.callbackFunction(numInputs, inputsPtr, numOutputs, outputsPtr, numParams, paramsPtr, this.userData); + + if (didProduceAudio) { + // Read back the produced audio data to all outputs and their channels. + // (A garbage-free function TypedArray.copy(dstTypedArray, dstOffset, srcTypedArray, srcOffset, count) would sure be handy.. + // but web does not have one, so manually copy all bytes in) + for(i of outputList) { + for(j of i) { + for(k = 0; k < 128; ++k) { + j[k] = HEAPF32[outputDataPtr++]; + } + } + } + } + + stackRestore(oldStackPtr); + + // Return 'true' to tell the browser to continue running this processor. (Returning 1 or any other truthy value won't work in Chrome) + return !!didProduceAudio; + } + } + return WasmAudioWorkletProcessor; +} + +// Specify a worklet processor that will be used to receive messages to this AudioWorkletGlobalScope. +// We never connect this initial AudioWorkletProcessor to the audio graph to do any audio processing. +class BootstrapMessages extends AudioWorkletProcessor { + constructor(arg) { + super(); + // Initialize the global Emscripten Module object that contains e.g. the Wasm Module and Memory objects. + // After this we are ready to load in the main application JS script, which the main thread will addModule() + // to this scope. + globalThis.Module = arg['processorOptions']; +#if WEBAUDIO_DEBUG + console.log('AudioWorklet global scope looks like this:'); + console.dir(globalThis); +#endif + // Listen to messages from the main thread. These messages will ask this scope to create the real + // AudioWorkletProcessors that call out to Wasm to do audio processing. + let p = this.port; + p.onmessage = (msg) => { + let d = msg.data; + // Register a real AudioWorkletProcessor that will actually do audio processing. + registerProcessor(d['name'], createWasmAudioWorkletProcessor(d['audioParams'])); +#if WEBAUDIO_DEBUG + console.log(`Registered a new WasmAudioWorkletProcessor "${d['name']}" with AudioParams: ${d['audioParams']}`); +#endif + // Post a message back telling that we have now registered the AudioWorkletProcessor class. + // This message does not need to contain any information - just need to let the main thread know that + // the processor can now be instantiated. + p.postMessage(0); + } + } + + // No-op, not doing audio processing in this processor. It is just for receiving bootstrap messages. + process() { +#if ASSERTIONS + console.error('This function should not be getting called ever!'); +#endif + } +}; + +// Register the dummy processor that will just receive messages. +registerProcessor("message", BootstrapMessages); diff --git a/src/library_wasm_worker.js b/src/library_wasm_worker.js index 7162251af06e8..58a91dc12f61f 100644 --- a/src/library_wasm_worker.js +++ b/src/library_wasm_worker.js @@ -43,16 +43,18 @@ mergeInto(LibraryManager.library, { ___set_stack_limits(_emscripten_stack_get_base(), _emscripten_stack_get_end()); #endif - // The Wasm Worker runtime is now up, so we can start processing - // any postMessage function calls that have been received. Drop the temp - // message handler that appended incoming postMessage function calls to a queue ... - removeEventListener('message', __wasm_worker_appendToQueue); - // ... then flush whatever messages we may have gotten in the queue ... - __wasm_worker_delayedMessageQueue.forEach(__wasm_worker_runPostMessage); - __wasm_worker_delayedMessageQueue = null; - // ... and finally register the proper postMessage handler that immediately - // dispatches incoming function calls without queueing them. - addEventListener('message', __wasm_worker_runPostMessage); + if (!ENVIRONMENT_IS_AUDIO_WORKLET) { + // The Wasm Worker runtime is now up, so we can start processing + // any postMessage function calls that have been received. Drop the temp + // message handler that appended incoming postMessage function calls to a queue ... + removeEventListener('message', __wasm_worker_appendToQueue); + // ... then flush whatever messages we may have gotten in the queue ... + __wasm_worker_delayedMessageQueue.forEach(__wasm_worker_runPostMessage); + __wasm_worker_delayedMessageQueue = null; + // ... and finally register the proper postMessage handler that immediately + // dispatches incoming function calls without queueing them. + addEventListener('message', __wasm_worker_runPostMessage); + } }, #if WASM_WORKERS == 2 diff --git a/src/library_webaudio.js b/src/library_webaudio.js new file mode 100644 index 0000000000000..a40de77b589c6 --- /dev/null +++ b/src/library_webaudio.js @@ -0,0 +1,203 @@ +let LibraryWebAudio = { + $EmAudio: {}, + $EmAudioCounter: 0, + + // Call this function from JavaScript to register a Wasm-side handle to an AudioContext that + // you have already created manually without calling emscripten_create_audio_context(). + // Note: To let that AudioContext be garbage collected later, call the function + // emscriptenDestroyAudioContext() to unbind it from Wasm. + $emscriptenRegisterAudioObject__deps: ['$EmAudio', '$EmAudioCounter'], + $emscriptenRegisterAudioObject: function(object) { + EmAudio[++EmAudioCounter] = object; +#if WEBAUDIO_DEBUG + console.log(`Registered new WebAudio object ${object} with ID ${EmAudioCounter}`); +#endif + return EmAudioCounter; + }, + + // Call this function from JavaScript to destroy a Wasm-side handle to an AudioContext. + // After calling this function, it is no longer possible to reference this AudioContext + // from Wasm code - and the GC can reclaim it after all references to it are cleared. + $emscriptenDestroyAudioContext: 'emscripten_destroy_audio_context', + + // Call this function from JavaScript to get the Web Audio object corresponding to the given + // Wasm handle ID. + $emscriptenGetAudioObject: function(objectHandle) { + return EmAudio[objectHandle]; + }, + + emscripten_destroy_audio_context__sig: 'vi', + emscripten_destroy_audio_context: function(contextHandle) { +#if WEBAUDIO_DEBUG + console.log(`Destroyed WebAudio context with ID ${contextHandle}`); +#endif + delete EmAudio[contextHandle]; + }, + + emscripten_destroy_wasm_audio_worklet_node: function(objectHandle) { +#if WEBAUDIO_DEBUG + console.log(`Destroyed Wasm AudioWorkletNode with ID ${objectHandle}`); +#endif + // Explicitly disconnect the node from Web Audio graph before letting it GC, + // to work around browser bugs such as https://bugs.webkit.org/show_bug.cgi?id=222098#c23 + EmAudio[objectHandle].disconnect(); + delete EmAudio[objectHandle]; + }, + + emscripten_create_audio_context__deps: ['$emscriptenRegisterAudioObject'], + emscripten_create_audio_context: function(options) { + let ctx = window.AudioContext || window.webkitAudioContext; +#if ASSERTIONS + if (!ctx) console.error('emscripten_create_audio_context failed! Web Audio is not supported.'); +#endif + options >>= 2; + + let opts = options ? { + latencyHint: HEAPU32[options] ? UTF8ToString(HEAPU32[options]) : void 0, + sampleRate: HEAP32[options+1] || void 0 + } : void 0; + +#if WEBAUDIO_DEBUG + console.log(`Creating new WebAudio context with parameters:`); + console.dir(opts); +#endif + + return ctx && emscriptenRegisterAudioObject(new ctx(opts)); + }, + + emscripten_resume_audio_context_async: function(contextHandle, callback, userData) { + function cb(state) { +#if WEBAUDIO_DEBUG + console.log(`emscripten_resume_audio_context_async() callback: New audio state="${EmAudio[contextHandle].state}", ID=${state}`); +#endif + {{{ makeDynCall('viii', 'callback') }}}(contextHandle, state, userData); + } +#if WEBAUDIO_DEBUG + console.log(`emscripten_resume_audio_context_async() resuming...`); +#endif + EmAudio[contextHandle].resume().then(() => { cb(1/*running*/) }).catch(() => { cb(0/*suspended*/) }); + }, + + emscripten_start_wasm_audio_worklet_thread_async__deps: [ +#if WASM_WORKERS + 'wasm_workers_id', +#endif + '$_EmAudioDispatchProcessorCallback'], + emscripten_start_wasm_audio_worklet_thread_async: function(contextHandle, stackLowestAddress, stackSize, callback, userData) { +#if !AUDIO_WORKLET + abort('emscripten_create_wasm_audio_worklet() requires building with -s AUDIO_WORKLET=1 enabled!'); +#endif + + let audioContext = EmAudio[contextHandle], + audioWorklet = audioContext.audioWorklet; + +#if ASSERTIONS + assert(stackLowestAddress != 0, 'AudioWorklets require a dedicated stack space for audio data marshalling between Wasm and JS!'); + assert(stackSize != 0, 'AudioWorklets require a dedicated stack space for audio data marshalling between Wasm and JS!'); + assert(!audioContext.audioWorkletInitialized, 'emscripten_create_wasm_audio_worklet() was already called for AudioContext ' + contextHandle + '! Only call this function once per AudioContext!'); + audioContext.audioWorkletInitialized = 1; +#endif + +#if WEBAUDIO_DEBUG + console.log(`emscripten_start_wasm_audio_worklet_thread_async() adding audioworklet.js...`); +#endif + + // TODO: In MINIMAL_RUNTIME builds, read this file off of a preloaded Blob, and/or embed from a string like with WASM_WORKERS==2 mode. + audioWorklet.addModule('{{{ TARGET_BASENAME }}}.aw.js').then(() => { +#if WEBAUDIO_DEBUG + console.log(`emscripten_start_wasm_audio_worklet_thread_async() addModule('audioworklet.js') completed`); +#endif + audioWorklet.bootstrapMessage = new AudioWorkletNode(audioContext, 'message', { + processorOptions: { +#if WASM_WORKERS + '$ww': _wasm_workers_id++, // Assign the loaded AudioWorkletGlobalScope a Wasm Worker ID so that it can utilized its own TLS slots, and it is recognized to not be the main browser thread. +#endif + 'wasm': Module['wasm'], + 'mem': wasmMemory, + 'sb': stackLowestAddress, + 'sz': stackSize, + } + }); + audioWorklet.bootstrapMessage.port.onmessage = _EmAudioDispatchProcessorCallback; + + // AudioWorklets do not have a importScripts() function like Web Workers do (and AudioWorkletGlobalScope does not allow dynamic import() either), + // but instead, the main thread must load all JS code into the worklet scope. Send the application main JS script to the audio worklet. + return audioWorklet.addModule(Module['js']); + }).then(() => { +#if WEBAUDIO_DEBUG + console.log(`emscripten_start_wasm_audio_worklet_thread_async() addModule() of main application JS completed`); +#endif + {{{ makeDynCall('vii', 'callback') }}}(contextHandle, 1/*EM_TRUE*/, userData); + }).catch(() => { +#if WEBAUDIO_DEBUG + console.error(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed!`); +#endif + {{{ makeDynCall('vii', 'callback') }}}(contextHandle, 0/*EM_FALSE*/, userData); + }); + }, + + // Store a queue of async callbacks we need to fire when AudioWorkletProcessors have completed + // loading. + $_EmAudioProcessorCreateRequests: [], + + $_EmAudioDispatchProcessorCallback__deps: ['$_EmAudioProcessorCreateRequests'], + $_EmAudioDispatchProcessorCallback: function() { + let e = _EmAudioProcessorCreateRequests.shift(); + {{{ makeDynCall('viii', 'e[1]') }}}(e[0], 1/*EM_TRUE*/, e[2]); + }, + + emscripten_create_wasm_audio_worklet_processor_async__deps: ['$_EmAudioProcessorCreateRequests'], + emscripten_create_wasm_audio_worklet_processor_async: function(contextHandle, options, callback, userData) { + _EmAudioProcessorCreateRequests.push([contextHandle, callback, userData]); + + options >>= 2; + let audioParams = [], + numAudioParams = HEAPU32[options+1], + audioParamDescriptors = HEAPU32[options+2] >> 2, + i = 0; + + while(numAudioParams--) { + audioParams.push({ + name: i++, + defaultValue: HEAPF32[audioParamDescriptors++], + minValue: HEAPF32[audioParamDescriptors++], + maxValue: HEAPF32[audioParamDescriptors++], + automationRate: ['a','k'][HEAPU32[audioParamDescriptors++]] + '-rate', + }); + } + +#if WEBAUDIO_DEBUG + console.log(`emscripten_create_wasm_audio_worklet_processor_async() creating a new AudioWorklet processor with name ${UTF8ToString(HEAPU32[options])}`); +#endif + + EmAudio[contextHandle].audioWorklet.bootstrapMessage.port.postMessage({ + name: UTF8ToString(HEAPU32[options]), + audioParams: audioParams + }); + }, + + emscripten_create_wasm_audio_worklet_node: function(audioContext, name, options, callback, userData) { + options >>= 2; + + function readChannelCountArray(heapIndex, numOutputs) { + let channelCounts = []; + while(numOutputs--) channelCounts.push(HEAPU32[heapIndex++]); + return channelCounts; + } + + let opts = options ? { + numberOfInputs: HEAP32[options], + numberOfOutputs: HEAP32[options+1], + outputChannelCount: HEAPU32[options+2] ? readChannelCountArray(HEAPU32[options+2]>>2, HEAP32[options+1]) : void 0, + processorOptions: { callback: callback, userData: userData } + } : void 0; + +#if WEBAUDIO_DEBUG + console.log(`Creating AudioWorkletNode "${UTF8ToString(name)}" on context=${audioContext} with options:`); + console.dir(opts); +#endif + return emscriptenRegisterAudioObject(new AudioWorkletNode(EmAudio[audioContext], UTF8ToString(name), opts)); + } +}; + +mergeInto(LibraryManager.library, LibraryWebAudio); diff --git a/src/postamble_minimal.js b/src/postamble_minimal.js index b14fe57d15253..7d244ac290633 100644 --- a/src/postamble_minimal.js +++ b/src/postamble_minimal.js @@ -215,6 +215,16 @@ WebAssembly.instantiate(Module['wasm'], imports).then(function(output) { assert(wasmTable); #endif +#if AUDIO_WORKLET + // If we are in the audio worklet environment, we can only access the Module object + // and not the global scope of the main JS script. Therefore we need to export + // all functions that the audio worklet scope needs onto the Module object. + Module['wasmTable'] = wasmTable; + Module['stackSave'] = stackSave; + Module['stackAlloc'] = stackAlloc; + Module['stackRestore'] = stackRestore; +#endif + #if !IMPORTED_MEMORY wasmMemory = asm['memory']; #if ASSERTIONS diff --git a/src/preamble_minimal.js b/src/preamble_minimal.js index 8491463323df4..2e5ab9c1d2753 100644 --- a/src/preamble_minimal.js +++ b/src/preamble_minimal.js @@ -81,8 +81,13 @@ function updateGlobalBufferAndViews(b) { HEAP32 = new Int32Array(b); HEAPU8 = new Uint8Array(b); HEAPU16 = new Uint16Array(b); +#if AUDIO_WORKLET + Module['HEAPU32'] = HEAPU32 = new Uint32Array(b); + Module['HEAPF32'] = HEAPF32 = new Float32Array(b); +#else HEAPU32 = new Uint32Array(b); HEAPF32 = new Float32Array(b); +#endif HEAPF64 = new Float64Array(b); } diff --git a/src/settings.js b/src/settings.js index fc0acdb0f31de..b11298345110e 100644 --- a/src/settings.js +++ b/src/settings.js @@ -1461,6 +1461,14 @@ var USE_PTHREADS = 0; // [compile+link] - affects user code at compile and system libraries at link. var WASM_WORKERS = 0; +// If true, enables targeting Wasm Web Audio AudioWorklets. +// [link] +var AUDIO_WORKLET = 0; + +// If true, enables deep debugging of Web Audio backend. +// [link] +var WEBAUDIO_DEBUG = 0; + // In web browsers, Workers cannot be created while the main browser thread // is executing JS/Wasm code, but the main thread must regularly yield back // to the browser event loop for Worker initialization to occur. diff --git a/src/settings_internal.js b/src/settings_internal.js index 7b171163bf177..a5291ac206fb3 100644 --- a/src/settings_internal.js +++ b/src/settings_internal.js @@ -129,6 +129,9 @@ var PTHREAD_WORKER_FILE = ''; // name of the file containing the Wasm Worker *.ww.js, if relevant var WASM_WORKER_FILE = ''; +// name of the file containing the Audio Worklet *.aw.js, if relevant +var AUDIO_WORKLET_FILE = ''; + // If 1, we are building with SharedArrayBuffer as Wasm Memory. var SHARED_MEMORY = 0; diff --git a/src/shell_minimal.js b/src/shell_minimal.js index 377b370356cbd..6b7bfe0da1abb 100644 --- a/src/shell_minimal.js +++ b/src/shell_minimal.js @@ -50,6 +50,10 @@ var ENVIRONMENT_IS_WEB = !ENVIRONMENT_IS_NODE; var ENVIRONMENT_IS_WASM_WORKER = Module['$ww']; #endif +#if AUDIO_WORKLET +var ENVIRONMENT_IS_AUDIO_WORKLET = typeof AudioWorkletGlobalScope !== 'undefined'; +#endif + #if ASSERTIONS && ENVIRONMENT_MAY_BE_NODE && ENVIRONMENT_MAY_BE_SHELL if (ENVIRONMENT_IS_NODE && ENVIRONMENT_IS_SHELL) { throw 'unclear environment'; diff --git a/system/include/emscripten/webaudio.h b/system/include/emscripten/webaudio.h new file mode 100644 index 0000000000000..5080ed96a958a --- /dev/null +++ b/system/include/emscripten/webaudio.h @@ -0,0 +1,109 @@ +/* + * Copyright 2021 The Emscripten Authors. All rights reserved. + * Emscripten is available under two separate licenses, the MIT license and the + * University of Illinois/NCSA Open Source License. Both these licenses can be + * found in the LICENSE file. + */ + +#pragma once + +#include +#include + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef int EMSCRIPTEN_WEBAUDIO_T; + +typedef struct EmscriptenWebAudioCreateAttributes +{ + const char *latencyHint; // Specify one of "balanced", "interactive" or "playback" + uint32_t sampleRate; // E.g. 44100 or 48000 +} EmscriptenWebAudioCreateAttributes; + +// Creates a new Web Audio AudioContext, and returns a handle to it. +EMSCRIPTEN_WEBAUDIO_T emscripten_create_audio_context(const EmscriptenWebAudioCreateAttributes *options); + +typedef int AUDIO_CONTEXT_STATE; +#define AUDIO_CONTEXT_STATE_SUSPENDED 0 +#define AUDIO_CONTEXT_STATE_RUNNING 1 +#define AUDIO_CONTEXT_STATE_CLOSED 2 + +typedef void (*EmscriptenResumeAudioContextCallback)(EMSCRIPTEN_WEBAUDIO_T audioContext, AUDIO_CONTEXT_STATE state, void *userData); + +// Resumes the given AudioContext. The specified callback will fire when the AudioContext has completed resuming. Call this function +// inside a user event handler (mousedown, button click, etc.) +void emscripten_resume_audio_context_async(EMSCRIPTEN_WEBAUDIO_T audioContext, EmscriptenResumeAudioContextCallback callback, void *userData); + +typedef void (*EmscriptenStartWebAudioWorkletCallback)(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData); + +// Create Wasm AudioWorklet thread. Call this function once at application startup to establish an AudioWorkletGlobalScope for your app. +// After the scope has been initialized, the given callback will fire. +void emscripten_start_wasm_audio_worklet_thread_async(EMSCRIPTEN_WEBAUDIO_T audioContext, void *stackLowestAddress, uint32_t stackSize, EmscriptenStartWebAudioWorkletCallback callback, void *userData); + +typedef int WEBAUDIO_PARAM_AUTOMATION_RATE; +#define WEBAUDIO_PARAM_A_RATE 0 +#define WEBAUDIO_PARAM_K_RATE 1 + +typedef struct WebAudioParamDescriptor +{ + float defaultValue; // Default == 0.0 + float minValue; // Default = -3.4028235e38; + float maxValue; // Default = 3.4028235e38; + WEBAUDIO_PARAM_AUTOMATION_RATE automationRate; // Either WEBAUDIO_PARAM_A_RATE or WEBAUDIO_PARAM_K_RATE. Default = WEBAUDIO_PARAM_A_RATE +} WebAudioParamDescriptor; + +typedef struct WebAudioWorkletProcessorCreateOptions +{ + const char *name; // The name of the AudioWorkletProcessor that is being created. + + int numAudioParams; + const WebAudioParamDescriptor *audioParamDescriptors; +} WebAudioWorkletProcessorCreateOptions; + +typedef void (*EmscriptenWorkletProcessorCreatedCallback)(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData); + +// Creates a new AudioWorkletProcessor with the given name and specified set of control parameters. +void emscripten_create_wasm_audio_worklet_processor_async(EMSCRIPTEN_WEBAUDIO_T audioContext, const WebAudioWorkletProcessorCreateOptions *options, EmscriptenWorkletProcessorCreatedCallback callback, void *userData); + +typedef int EMSCRIPTEN_AUDIO_WORKLET_NODE_T; + +typedef struct AudioSampleFrame +{ + const int numberOfChannels; + // An array of length numberOfChannels*128 elements, where data[channelIndex*128+i] locates the data of the i'th sample of channel channelIndex. + float *data; +} AudioSampleFrame; + +typedef struct AudioParamFrame +{ + // Specifies the length of the input array data (in float elements). This will be guaranteed to either have + // a value of 1 or 128, depending on whether the audio parameter changed during this frame. + int length; + // An array of length specified in 'length'. + float *data; +} AudioParamFrame; + +typedef EM_BOOL (*EmscriptenWorkletNodeProcessCallback)(int numInputs, const AudioSampleFrame *inputs, int numOutputs, AudioSampleFrame *outputs, int numParams, const AudioParamFrame *params, void *userData); + +typedef struct EmscriptenAudioWorkletNodeCreateOptions +{ + // How many audio nodes does this node take inputs from? Default=1 + int numberOfInputs; + // How many audio nodes does this node output to? Default=1 + int numberOfOutputs; + // For each output, specifies the number of audio channels (1=mono/2=stereo/etc.) for that output. Default=an array of ones for each output channel. + int *outputChannelCounts; +} EmscriptenAudioWorkletNodeCreateOptions; + +// Instantiates the given AudioWorkletProcessor as an AudioWorkletNode, which continuously calls the specified processCallback() function on the browser's audio thread to perform audio processing. +EMSCRIPTEN_AUDIO_WORKLET_NODE_T emscripten_create_wasm_audio_worklet_node(EMSCRIPTEN_WEBAUDIO_T audioContext, const char *name, const EmscriptenAudioWorkletNodeCreateOptions *options, EmscriptenWorkletNodeProcessCallback processCallback, void *userData); + + +#ifdef __cplusplus +} // ~extern "C" +#endif diff --git a/tests/webaudio/audioworklet.c b/tests/webaudio/audioworklet.c new file mode 100644 index 0000000000000..61e41ddd7dd05 --- /dev/null +++ b/tests/webaudio/audioworklet.c @@ -0,0 +1,90 @@ +#include +#include +#include + +/* Steps to use Wasm-based AudioWorklets: + 1. Create a Web Audio AudioContext either via manual JS code and calling emscriptenRegisterAudioObject() from JS, or by calling emscripten_create_audio_context() (shown in this sample) + 2. Initialize a Wasm AudioWorklet scope on the audio context by calling emscripten_start_wasm_audio_worklet_thread_async(). This shares the Wasm Module, Memory, etc. to the AudioWorklet scope, + and establishes the stack space for the Audio Worklet. + This needs to be called exactly once during page's lifetime. There is no mechanism in Web Audio to shut down/uninitialize the scope. + 3. Create one or more of Audio Worklet Processors with the desired name and AudioParam configuration. + 4. Instantiate Web Audio audio graph nodes from the above created worklet processors, specifying the desired input-output configurations and Wasm-side function callbacks to call for each node. + 5. Add the graph nodes to the Web Audio graph, and the audio callbacks should begin to fire. +*/ + +// This function will be called for every fixed 128 samples of audio to be processed. +EM_BOOL ProcessAudio(int numInputs, const AudioSampleFrame *inputs, int numOutputs, AudioSampleFrame *outputs, int numParams, const AudioParamFrame *params, void *userData) +{ + // Produce noise in all output channels. + for(int i = 0; i < numOutputs; ++i) + for(int j = 0; j < 128*outputs[i].numberOfChannels; ++j) + outputs[i].data[j] = (rand() / (float)RAND_MAX * 2.0f - 1.0f) * 0.3f; + + // We generated audio and want to keep this processor going. Return EM_FALSE here to shut down. + return EM_TRUE; +} + +// This callback will fire after the Audio Worklet Processor has finished being added to the Worklet global scope. +void AudioWorkletProcessorCreated(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + if (!success) return; + + // Specify the input and output node configurations for the Wasm Audio Worklet. A simple setup with single mono output channel here, and no inputs. + int outputChannelCounts[1] = { 1 }; + + EmscriptenAudioWorkletNodeCreateOptions options = { + .numberOfInputs = 0, + .numberOfOutputs = 1, + .outputChannelCounts = outputChannelCounts + }; + + // Instantiate the noise-generator Audio Worklet Processor. + EMSCRIPTEN_AUDIO_WORKLET_NODE_T wasmAudioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "noise-generator", &options, &ProcessAudio, 0); + + EM_ASM({ + let audioContext = emscriptenGetAudioObject($0); + + // Add a button on the page to toggle playback as a response to user click. + let startButton = document.createElement('button'); + startButton.innerHTML = 'Toggle playback'; + document.body.appendChild(startButton); + + startButton.onclick = () => { + if (audioContext.state != 'running') { + audioContext.resume(); + let audioWorkletNode = emscriptenGetAudioObject($1); + + // Connect the audio worklet node to the graph. + audioWorkletNode.connect(audioContext.destination); + } else { + audioContext.suspend(); + } + }; + }, audioContext, wasmAudioWorklet); +} + +// This callback will fire when the Wasm Module has been shared to the AudioWorklet global scope, and is now ready to begin adding Audio Worklet Processors. +void WebAudioWorkletThreadInitialized(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + if (!success) return; + + WebAudioWorkletProcessorCreateOptions opts = { + .name = "noise-generator", + }; + emscripten_create_wasm_audio_worklet_processor_async(audioContext, &opts, AudioWorkletProcessorCreated, 0); +} + +// Define a global stack space for the AudioWorkletGlobalScope. Note that all AudioWorkletProcessors and/or AudioWorkletNodes on the given Audio Context all share the same AudioWorkerGlobalScope, +// i.e. they all run on the same one audio thread (multiple nodes/processors do not each get their own thread). Hence one stack is enough. +uint8_t wasmAudioWorkletStack[4096]; + +int main() +{ + srand(time(NULL)); + + // Create an audio context + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(0 /* use default constructor options */); + + // and kick off Audio Worklet scope initialization, which shares the Wasm Module and Memory to the AudioWorklet scope and initializes its stack. + emscripten_start_wasm_audio_worklet_thread_async(context, wasmAudioWorkletStack, sizeof(wasmAudioWorkletStack), WebAudioWorkletThreadInitialized, 0); +} diff --git a/tests/webaudio/create_webaudio.c b/tests/webaudio/create_webaudio.c new file mode 100644 index 0000000000000..c149fb97f3011 --- /dev/null +++ b/tests/webaudio/create_webaudio.c @@ -0,0 +1,31 @@ +#include + +// This code shows a simple example of how to create a Web Audio context from C/C++ code using the webaudio.h API, +// and how to add a pure sine wave tone generator to it. + +int main() +{ + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(0 /* use default constructor options */); + + // Illustrate how this handle can be passed to JS code (e.g. to a JS library function, EM_ASM or a EM_JS block) + EM_ASM({ + var audioContext = emscriptenGetAudioObject($0); + + var oscillator = audioContext.createOscillator(); + oscillator.connect(audioContext.destination); + oscillator.start(); + + // Add a button on the page to toggle playback as a response to user click. + var startButton = document.createElement('button'); + startButton.innerHTML = 'Toggle playback'; + document.body.appendChild(startButton); + + startButton.onclick = () => { + if (audioContext.state != 'running') { + audioContext.resume(); + } else { + audioContext.suspend(); + } + }; + }, context); +} diff --git a/tests/webaudio/tone_generator.c b/tests/webaudio/tone_generator.c new file mode 100644 index 0000000000000..f456193ddb68d --- /dev/null +++ b/tests/webaudio/tone_generator.c @@ -0,0 +1,125 @@ +#include +#include + +// This program tests that sharing the WebAssembly Memory works between the audio generator thread and the main browser UI thread. +// Two sliders, frequency and volume, can be adjusted on the HTML page, and the audio thread generates a sine wave tone based on +// these parameters. + +// Implement smooth transition between the UI values and the values that the audio callback are actually processing, to avoid crackling when user adjusts the sliders. +float targetToneFrequency = 440.0f; // [shared variable between main thread and audio thread] +float targetVolume = 0.3f; // [shared variable between main thread and audio thread] + +#define SAMPLE_RATE 48000 +#define PI 3.14159265359 + +float phase = 0.f; // [local variable to the audio thread] +float phaseIncrement = 440 * 2.f * PI / SAMPLE_RATE; // [local variable to the audio thread] +float currentVolume = 0.3; // [local variable to the audio thread] + +// This function will be called for every fixed 128 samples of audio to be processed. +EM_BOOL ProcessAudio(int numInputs, const AudioSampleFrame *inputs, int numOutputs, AudioSampleFrame *outputs, int numParams, const AudioParamFrame *params, void *userData) +{ + // Interpolate towards the target frequency and volume values. + float targetPhaseIncrement = targetToneFrequency * 2.f * PI / SAMPLE_RATE; + phaseIncrement = phaseIncrement * 0.95f + 0.05f * targetPhaseIncrement; + currentVolume = currentVolume * 0.95f + 0.05f * targetVolume; + + // Produce a sine wave tone of desired frequency to all output channels. + for(int o = 0; o < numOutputs; ++o) + for(int i = 0; i < 128; ++i) + { + float s = emscripten_math_sin(phase); + phase += phaseIncrement; + for(int ch = 0; ch < outputs[o].numberOfChannels; ++ch) + outputs[o].data[ch*128 + i] = s * currentVolume; + } + + // Range reduce to keep precision around zero. + phase = emscripten_math_fmod(phase, 2.f * PI); + + // We generated audio and want to keep this processor going. Return EM_FALSE here to shut down. + return EM_TRUE; +} + +// This callback will fire after the Audio Worklet Processor has finished being added to the Worklet global scope. +void AudioWorkletProcessorCreated(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + if (!success) return; + + // Specify the input and output node configurations for the Wasm Audio Worklet. A simple setup with single mono output channel here, and no inputs. + int outputChannelCounts[1] = { 1 }; + + EmscriptenAudioWorkletNodeCreateOptions options = { + .numberOfInputs = 0, + .numberOfOutputs = 1, + .outputChannelCounts = outputChannelCounts + }; + + // Instantiate the noise-generator Audio Worklet Processor. + EMSCRIPTEN_AUDIO_WORKLET_NODE_T wasmAudioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "tone-generator", &options, &ProcessAudio, 0); + + EM_ASM({ + let audioContext = emscriptenGetAudioObject($0); + + // Add a button on the page to toggle playback as a response to user click. + let startButton = document.createElement('button'); + startButton.innerHTML = 'Toggle playback'; + document.body.appendChild(startButton); + + startButton.onclick = () => { + if (audioContext.state != 'running') { + audioContext.resume(); + let audioWorkletNode = emscriptenGetAudioObject($1); + + // Connect the audio worklet node to the graph. + audioWorkletNode.connect(audioContext.destination); + } else { + audioContext.suspend(); + } + }; + }, audioContext, wasmAudioWorklet); +} + +// This callback will fire when the Wasm Module has been shared to the AudioWorklet global scope, and is now ready to begin adding Audio Worklet Processors. +void WebAudioWorkletThreadInitialized(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void *userData) +{ + if (!success) return; + + WebAudioWorkletProcessorCreateOptions opts = { + .name = "tone-generator", + }; + emscripten_create_wasm_audio_worklet_processor_async(audioContext, &opts, AudioWorkletProcessorCreated, 0); +} + +// Define a global stack space for the AudioWorkletGlobalScope. Note that all AudioWorkletProcessors and/or AudioWorkletNodes on the given Audio Context all share the same AudioWorkerGlobalScope, +// i.e. they all run on the same one audio thread (multiple nodes/processors do not each get their own thread). Hence one stack is enough. +uint8_t wasmAudioWorkletStack[4096]; + +int main() +{ + // Add a UI slider to the page to adjust the pitch of the tone. + EM_ASM({ + let div = document.createElement('div'); + div.innerHTML = 'Choose frequency: 440
' + + 'Choose volume: 30%
'; + document.body.appendChild(div); + document.querySelector('#pitch').oninput = (e) => { + document.querySelector('#pitchValue').innerHTML = HEAPF32[$0>>2] = parseInt(e.target.value); + }; + document.querySelector('#volume').oninput = (e) => { + HEAPF32[$1>>2] = parseInt(e.target.value) / 100; + document.querySelector('#volumeValue').innerHTML = parseInt(e.target.value) + '%'; + }; + }, &targetToneFrequency, &targetVolume); + + // Create an audio context + EmscriptenWebAudioCreateAttributes attrs = { + .latencyHint = "interactive", + .sampleRate = SAMPLE_RATE + }; + + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(&attrs); + + // and kick off Audio Worklet scope initialization, which shares the Wasm Module and Memory to the AudioWorklet scope and initializes its stack. + emscripten_start_wasm_audio_worklet_thread_async(context, wasmAudioWorkletStack, sizeof(wasmAudioWorkletStack), WebAudioWorkletThreadInitialized, 0); +}