diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml diff --git a/.gitignore b/.gitignore index 8e5ba363..b61b5494 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,15 @@ NOTES.md /generator/src .DS_Store +# Working files +TODOS.md +NOTES.md +run-wpt.* +all-check.sh +tests/junk.mjs +yarn.lock +issues + # Created by https://www.toptal.com/developers/gitignore/api/node # Edit at https://www.toptal.com/developers/gitignore?templates=node @@ -131,10 +140,3 @@ Cargo.lock *.env -# Working files -TODOS.md -NOTES.md -run-wpt.* -all-check.sh -tests/junk.mjs -yarn.lock diff --git a/Cargo.toml b/Cargo.toml index bdd4f1f6..23b20c83 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,8 +11,8 @@ crate-type = ["cdylib"] [dependencies] crossbeam-channel = "0.5.12" -napi = { version="2.15", features=["napi9", "tokio_rt"] } -napi-derive = { version="2.15" } +napi = { version="2.16", features=["napi9", "tokio_rt"] } +napi-derive = { version="2.16" } thread-priority = "1.1.0" web-audio-api = "=0.45" # web-audio-api = { path = "../web-audio-api-rs" } diff --git a/generator/rs/lib.tmpl.rs b/generator/rs/lib.tmpl.rs index 4340668f..aa4b5f21 100644 --- a/generator/rs/lib.tmpl.rs +++ b/generator/rs/lib.tmpl.rs @@ -1,4 +1,6 @@ #![deny(clippy::all)] +// @todo - properly fix this clippy issue +#![allow(clippy::zero_repeat_side_effects)] use napi::{Env, JsObject, Result}; use napi_derive::module_exports; diff --git a/js/AudioContext.js b/js/AudioContext.js index 9a7968f4..077671aa 100644 --- a/js/AudioContext.js +++ b/js/AudioContext.js @@ -104,7 +104,7 @@ module.exports = function(jsExport, nativeBinding) { // object after its instantiation, and that we don't have any initial `resume` call. this[kNapiObj].listen_to_events(); - // @todo - check if this is still required + // @todo - This is probably not requested anymore as the event listeners // prevent garbage collection and process exit const id = contextId++; // store in process to prevent garbage collection diff --git a/js/AudioWorklet.js b/js/AudioWorklet.js index 8d9268dc..fdc579f2 100644 --- a/js/AudioWorklet.js +++ b/js/AudioWorklet.js @@ -64,7 +64,7 @@ const resolveModule = async (moduleUrl) => { // get caller site from error stack trace const callerSite = caller(2); - if (callerSite.startsWith('http')) { + if (callerSite.startsWith('http')) { // this branch exists for wpt where caller site is an url let url; // handle origin relative and caller path relative URLs if (moduleUrl.startsWith('/')) { diff --git a/js/OfflineAudioContext.js b/js/OfflineAudioContext.js index 60bcd317..6c85971e 100644 --- a/js/OfflineAudioContext.js +++ b/js/OfflineAudioContext.js @@ -82,24 +82,26 @@ module.exports = function patchOfflineAudioContext(jsExport, nativeBinding) { // Add function to Napi object to bridge from Rust events to JS EventTarget // They will be effectively registered on rust side when `startRendering` is called - this[kNapiObj][kOnStateChange] = (function(err, rawEvent) { + this[kNapiObj][kOnStateChange] = (function(_err, rawEvent) { const event = new Event(rawEvent.type); propagateEvent(this, event); }).bind(this); // This event is, per spec, the last trigerred one this[kNapiObj][kOnComplete] = (function(err, rawEvent) { - // workaround the fact that this event seems to be triggered before + // workaround the fact that the oncomplete event is triggered before // startRendering fulfills and that we want to return the exact same instance - if (this.#renderedBuffer === null) { - this.#renderedBuffer = new jsExport.AudioBuffer({ [kNapiObj]: rawEvent.renderedBuffer }); - } + this.#renderedBuffer = new jsExport.AudioBuffer({ [kNapiObj]: rawEvent.renderedBuffer }); const event = new jsExport.OfflineAudioCompletionEvent(rawEvent.type, { renderedBuffer: this.#renderedBuffer, }); - propagateEvent(this, event); + // delay event propagation to next tick that it is executed after startRendering fulfills + setTimeout(() => { + propagateEvent(this, event); + this.#renderedBuffer = null; + }, 0); }).bind(this); } @@ -145,15 +147,9 @@ module.exports = function patchOfflineAudioContext(jsExport, nativeBinding) { throwSanitizedError(err); } - // release audio worklet, if any + // release audio worklets await this.audioWorklet[kWorkletRelease](); - // workaround the fact that this event seems to be triggered before - // startRendering fulfills and that we want to return the exact same instance - if (this.#renderedBuffer === null) { - this.#renderedBuffer = new jsExport.AudioBuffer({ [kNapiObj]: nativeAudioBuffer }); - } - return this.#renderedBuffer; } diff --git a/package.json b/package.json index 77acce9b..d499179c 100644 --- a/package.json +++ b/package.json @@ -44,7 +44,8 @@ "lint": "npx eslint index.cjs index.mjs && npx eslint js/*.js && npx eslint examples/*.mjs", "preversion": "yarn install && npm run generate", "postversion": "cargo bump $npm_package_version && git commit -am \"v$npm_package_version\" && node .scripts/check-changelog.mjs", - "test": "mocha tests", + "test": "mocha tests/*.spec.js", + "test:only": "mocha", "wpt": "npm run build && node ./.scripts/wpt-harness.mjs", "wpt:only": "node ./.scripts/wpt-harness.mjs" }, diff --git a/src/audio_buffer.rs b/src/audio_buffer.rs index 3a0e78f2..d45de379 100644 --- a/src/audio_buffer.rs +++ b/src/audio_buffer.rs @@ -5,6 +5,13 @@ use web_audio_api::{AudioBuffer, AudioBufferOptions}; pub(crate) struct NapiAudioBuffer(Option); +// for debug purpose +// impl Drop for NapiAudioBuffer { +// fn drop(&mut self) { +// println!("NAPI: NapiAudioBuffer dropped"); +// } +// } + impl NapiAudioBuffer { pub fn create_js_class(env: &Env) -> Result { env.define_class( diff --git a/src/audio_context.rs b/src/audio_context.rs index 94d7f7f9..2cce1a2b 100644 --- a/src/audio_context.rs +++ b/src/audio_context.rs @@ -252,10 +252,18 @@ fn listen_to_events(ctx: CallContext) -> Result { let k_onstatechange = crate::utils::get_symbol_for(ctx.env, "node-web-audio-api:onstatechange"); let statechange_cb = js_this.get_property(k_onstatechange).unwrap(); + let context_clone = Arc::clone(&napi_context.0); + let mut statechange_tsfn = ctx.env.create_threadsafe_function( &statechange_cb, 0, - |ctx: ThreadSafeCallContext| { + move |ctx: ThreadSafeCallContext| { + if context_clone.state() == AudioContextState::Closed { + // clear all context listeners, so that it can be garbage collected + context_clone.clear_onsinkchange(); + context_clone.clear_onstatechange(); + } + let mut event = ctx.env.create_object()?; let event_type = ctx.env.create_string(ctx.value.type_)?; event.set_named_property("type", event_type)?; diff --git a/src/audio_worklet_node.rs b/src/audio_worklet_node.rs index 8867e93d..cc98c746 100644 --- a/src/audio_worklet_node.rs +++ b/src/audio_worklet_node.rs @@ -426,7 +426,7 @@ fn process_audio_worklet(env: &Env, processors: &JsObject, args: ProcessorArgume let queue_task = processor.get_property::(k_worklet_queue_task)?; let js_cmd = env.create_string(&cmd)?; let js_err = env.create_error(err)?; - queue_task.apply2(processor, js_cmd, js_err)?; + let _: Result = queue_task.apply2(processor, js_cmd, js_err); } Ok(()) diff --git a/src/lib.rs b/src/lib.rs index 6f486491..0f6684eb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -18,6 +18,8 @@ // -------------------------------------------------------------------------- // #![deny(clippy::all)] +// @todo - properly fix this clippy issue (really don't understand the problem...) +#![allow(clippy::zero_repeat_side_effects)] use napi::{Env, JsObject, Result}; use napi_derive::module_exports; diff --git a/src/offline_audio_context.rs b/src/offline_audio_context.rs index ba658dde..3db04b7a 100644 --- a/src/offline_audio_context.rs +++ b/src/offline_audio_context.rs @@ -13,7 +13,7 @@ use crate::*; #[derive(Clone)] pub(crate) struct NapiOfflineAudioContext(Arc, usize); -// for debug purpose +// // for debug purpose // impl Drop for NapiOfflineAudioContext { // fn drop(&mut self) { // println!("NAPI: NapiOfflineAudioContext dropped"); @@ -115,21 +115,34 @@ fn start_rendering(ctx: CallContext) -> Result { }, )?; + // unref tsfn so they do not prevent the process to exit + let _ = statechange_tsfn.unref(ctx.env); + + context.set_onstatechange(move |e| { + statechange_tsfn.call(Ok(e), ThreadsafeFunctionCallMode::Blocking); + }); + let k_oncomplete = crate::utils::get_symbol_for(ctx.env, "node-web-audio-api:oncomplete"); let complete_cb = js_this.get_property(k_oncomplete).unwrap(); + let context_clone = Arc::clone(&napi_context.0); + let mut complete_tsfn = ctx.env.create_threadsafe_function( &complete_cb, 0, - |ctx: ThreadSafeCallContext| { + move |ctx: ThreadSafeCallContext| { + // This is the last event that can be trigerred by this context, we can + // clear the listeners so that the context can be properly garbage collected + context_clone.clear_onstatechange(); + context_clone.clear_oncomplete(); + let raw_event = ctx.value; let mut event = ctx.env.create_object()?; let event_type = ctx.env.create_string("complete")?; event.set_named_property("type", event_type)?; - // @fixme: this event is propagated before `startRedering` fulfills - // which is probaly wrong, so let's propagate the JS audio buffer - // and let JS handle the race condition + // This event is propagated before `startRendering` fulfills + // which is wrong, order is fixed on JS side. let ctor = crate::utils::get_class_ctor(&ctx.env, "AudioBuffer")?; let js_audio_buffer = ctor.new_instance(&[ctx.env.get_null()?])?; let napi_audio_buffer = ctx.env.unwrap::(&js_audio_buffer)?; @@ -142,13 +155,8 @@ fn start_rendering(ctx: CallContext) -> Result { )?; // unref tsfn so they do not prevent the process to exit - let _ = statechange_tsfn.unref(ctx.env); let _ = complete_tsfn.unref(ctx.env); - context.set_onstatechange(move |e| { - statechange_tsfn.call(Ok(e), ThreadsafeFunctionCallMode::Blocking); - }); - context.set_oncomplete(move |e| { complete_tsfn.call(Ok(e), ThreadsafeFunctionCallMode::Blocking); }); diff --git a/tests/OfflineAudioContext.spec.mjs b/tests/OfflineAudioContext.spec.mjs index 4a515d5b..f4d2006e 100644 --- a/tests/OfflineAudioContext.spec.mjs +++ b/tests/OfflineAudioContext.spec.mjs @@ -1,18 +1,24 @@ import { assert } from 'chai'; import { AudioContext, + AudioBuffer, OfflineAudioContext, } from '../index.mjs'; describe('# OfflineAudioContext', () => { describe('## await startRendering()', () => { - it('buffer returned by startRedring and buffer from complete should be same instance', async () => { + it('buffer returned by startRendering and buffer from `oncomplete` event should be same instance', async () => { const offline = new OfflineAudioContext(1, 48000, 48000); let aResult = null; let bResult = null; + let renderingEnded = false; - offline.addEventListener('complete', (e) => aResult = e.renderedBuffer); + offline.addEventListener('complete', (e) => { + // check that the complete event is triggered after startRendering fulfills + assert.isTrue(renderingEnded); + aResult = e.renderedBuffer; + }); const osc = offline.createOscillator(); osc.connect(offline.destination); @@ -21,9 +27,12 @@ describe('# OfflineAudioContext', () => { osc.stop(1.); bResult = await offline.startRendering(); + renderingEnded = true; // make sure we received the event await new Promise(resolve => setTimeout(resolve, 100)); + assert.isTrue(aResult instanceof AudioBuffer); + assert.isTrue(bResult instanceof AudioBuffer); assert.deepEqual(aResult, bResult); }); });