diff --git a/README.md b/README.md index 09fe855..3a55985 100644 --- a/README.md +++ b/README.md @@ -183,11 +183,11 @@ speaker.start() Write frames of audio: ```typescript -function getNextAudioFrame(): Uint8Array | Int16Array | Int32Array { +function getNextAudioFrame(): ArrayBuffer { // } -speaker.writeSync(getNextAudioFrame()) +speaker.write(getNextAudioFrame()) ``` To stop recording, call `stop()` on the instance: diff --git a/binding/nodejs/README.md b/binding/nodejs/README.md index 8abf3a3..1c51f5c 100644 --- a/binding/nodejs/README.md +++ b/binding/nodejs/README.md @@ -24,9 +24,9 @@ const { PvSpeaker } = require("@picovoice/pvspeaker-node"); const sampleRate = 22050; const bitsPerSample = 16; -const recorder = new PvSpeaker(sampleRate, bitsPerSample); +const speaker = new PvSpeaker(sampleRate, bitsPerSample); -recorder.start() +speaker.start() ``` (or) @@ -42,19 +42,19 @@ const devices = PvSpeaker.getAvailableDevices() const sampleRate = 22050; const bitsPerSample = 16; const deviceIndex = 0; -const recorder = new PvSpeaker(sampleRate, bitsPerSample, deviceIndex); +const speaker = new PvSpeaker(sampleRate, bitsPerSample, deviceIndex); -recorder.start() +speaker.start() ``` Write frames of audio: ```typescript -function getNextAudioFrame(): Uint8Array | Int16Array | Int32Array { +function getNextAudioFrame(): ArrayBuffer { // } -speaker.writeSync(getNextAudioFrame()) +speaker.write(getNextAudioFrame()) ``` To stop recording, call `stop()` on the instance: diff --git a/binding/nodejs/src/pv_speaker.ts b/binding/nodejs/src/pv_speaker.ts index e10b2a4..587ae90 100644 --- a/binding/nodejs/src/pv_speaker.ts +++ b/binding/nodejs/src/pv_speaker.ts @@ -48,9 +48,6 @@ class PvSpeaker { frameLength: number = 512, bufferedFramesCount = 50, ) { - if (bitsPerSample === 24 || (bitsPerSample !== 8 && bitsPerSample !== 16 && bitsPerSample !== 32)) { - throw pvSpeakerStatusToException(PvSpeakerStatus.INVALID_ARGUMENT, `Invalid bits per sample: ${bitsPerSample}`); - } let pvSpeakerHandleAndStatus; try { pvSpeakerHandleAndStatus = PvSpeaker._pvSpeaker.init( @@ -105,7 +102,7 @@ class PvSpeaker { } /** - * Starts the audio output device. After starting, pcm frames can be sent to the audio output device via `write` or `writeSync`. + * Starts the audio output device. After starting, pcm frames can be sent to the audio output device via `write`. */ public start(): void { const status = PvSpeaker._pvSpeaker.start(this._handle); @@ -124,59 +121,26 @@ class PvSpeaker { } } - private _handlePcm(pcm: Uint8Array | Int16Array | Int32Array): void { - if (pcm instanceof Uint8Array && this._bitsPerSample !== 8) { - throw pvSpeakerStatusToException( - PvSpeakerStatus.INVALID_ARGUMENT, - `Expected 8 bits per sample for Uint8Array, but PvSpeaker was initialized with ${this.bitsPerSample} bits per sample.`); - } - if (pcm instanceof Int16Array && this._bitsPerSample !== 16) { - throw pvSpeakerStatusToException( - PvSpeakerStatus.INVALID_ARGUMENT, - `Expected 16 bits per sample for Int16Array, but PvSpeaker was initialized with ${this.bitsPerSample} bits per sample.`); - } - if (pcm instanceof Int32Array && this._bitsPerSample !== 32) { - throw pvSpeakerStatusToException( - PvSpeakerStatus.INVALID_ARGUMENT, - `Expected 32 bits per sample for Int32Array, but PvSpeaker was initialized with ${this.bitsPerSample} bits per sample.`); - } - + /** + * Synchronous call to write a frame of audio data. + * + * @returns {Boolean} + */ + public write(pcm: ArrayBuffer): void { let i = 0; - while (i < pcm.length) { - const isLastFrame = i + this.frameLength >= pcm.length; - const writeFrameLength = isLastFrame ? pcm.length - i : this.frameLength; + const frameLength = this._frameLength * this._bitsPerSample / 8; + while (i < pcm.byteLength) { + const isLastFrame = i + frameLength >= pcm.byteLength; + const writeFrameLength = isLastFrame ? pcm.byteLength - i : frameLength; const frame = pcm.slice(i, i + writeFrameLength); - - const status = PvSpeaker._pvSpeaker.write(this._handle, frame); + const status = PvSpeaker._pvSpeaker.write(this._handle, this._bitsPerSample, frame); if (status !== PvSpeakerStatus.SUCCESS) { throw pvSpeakerStatusToException(status, "PvSpeaker failed to write audio data frame."); } - i += this.frameLength; + i += frameLength; } } - /** - * Asynchronous call to write a frame of audio data. - * - * @returns {Promise} - */ - public async write(pcm: Uint8Array | Int16Array | Int32Array): Promise { - return new Promise(resolve => { - setTimeout(() => { - this._handlePcm(pcm); - resolve(); - }); - }); - } - - /** - * Synchronous call to write a frame of audio data. - * - * @returns {Boolean} - */ - public writeSync(pcm: Uint8Array | Int16Array | Int32Array): void { - this._handlePcm(pcm); - } /** * Enable or disable debug logging for PvSpeaker. Debug logs will indicate when there are overflows in the internal diff --git a/binding/nodejs/test/pv_speaker.test.ts b/binding/nodejs/test/pv_speaker.test.ts index 2d9cc65..ab8e9a7 100644 --- a/binding/nodejs/test/pv_speaker.test.ts +++ b/binding/nodejs/test/pv_speaker.test.ts @@ -29,42 +29,13 @@ describe("Test PvSpeaker", () => { expect(f).toThrow(Error); }); - test("start stop (Int16Array)", async () => { + test("start stop", async () => { const speaker = new PvSpeaker(SAMPLE_RATE, BITS_PER_SAMPLE); speaker.start(); const f = async () => { - const frames = new Int16Array(FRAME_LENGTH * 2); - speaker.writeSync(frames); - await speaker.write(frames); - speaker.release(); - } - - expect(f).not.toThrow(Error); - }); - - test("start stop (Uint8Array)", async () => { - const speaker = new PvSpeaker(SAMPLE_RATE, 8); - speaker.start(); - - const f = async () => { - const frames = new Uint8Array(FRAME_LENGTH * 2); - speaker.writeSync(frames); - await speaker.write(frames); - speaker.release(); - } - - expect(f).not.toThrow(Error); - }); - - test("start stop (Int32Array)", async () => { - const speaker = new PvSpeaker(SAMPLE_RATE, 32); - speaker.start(); - - const f = async () => { - const frames = new Int32Array(FRAME_LENGTH * 2); - speaker.writeSync(frames); - await speaker.write(frames); + const frames = new Int16Array(FRAME_LENGTH * 2).buffer; + speaker.write(frames); speaker.release(); } diff --git a/demo/nodejs/demo.js b/demo/nodejs/demo.js index bfa8f51..aad094a 100644 --- a/demo/nodejs/demo.js +++ b/demo/nodejs/demo.js @@ -46,49 +46,32 @@ async function runDemo() { console.log(`index: ${i}, device name: ${devices[i]}`) } } else { - const buffer = fs.readFileSync(inputWavPath); + const wavBuffer = fs.readFileSync(inputWavPath); - if (buffer.toString('utf8', 0, 4) !== 'RIFF' || buffer.toString('utf8', 8, 12) !== 'WAVE') { + if (wavBuffer.toString('utf8', 0, 4) !== 'RIFF' || wavBuffer.toString('utf8', 8, 12) !== 'WAVE') { throw new Error('Invalid WAV file'); } - const formatChunkOffset = buffer.indexOf('fmt ', 12); + const formatChunkOffset = wavBuffer.indexOf('fmt ', 12); if (formatChunkOffset === -1) { throw new Error('Invalid WAV file: fmt chunk not found'); } - const numChannels = buffer.readUInt16LE(formatChunkOffset + 10); - const sampleRate = buffer.readUInt32LE(formatChunkOffset + 12); - const bitsPerSample = buffer.readUInt16LE(formatChunkOffset + 22); + const numChannels = wavBuffer.readUInt16LE(formatChunkOffset + 10); + const sampleRate = wavBuffer.readUInt32LE(formatChunkOffset + 12); + const bitsPerSample = wavBuffer.readUInt16LE(formatChunkOffset + 22); if (numChannels !== 1) { throw new Error('WAV file must have a single channel (MONO)'); } - const dataChunkOffset = buffer.indexOf('data', formatChunkOffset + 24); + const dataChunkOffset = wavBuffer.indexOf('data', formatChunkOffset + 24); if (dataChunkOffset === -1) { throw new Error('Invalid WAV file: data chunk not found'); } - const dataChunkSize = buffer.readUInt32LE(dataChunkOffset + 4); - const pcmDataStart = dataChunkOffset + 8; - const pcmDataEnd = pcmDataStart + dataChunkSize; - const pcmDataBuffer = buffer.subarray(pcmDataStart, pcmDataEnd); - - let pcm; - switch (bitsPerSample) { - case 8: - pcm = new Uint8Array(pcmDataBuffer); - break; - case 16: - pcm = new Int16Array(pcmDataBuffer.buffer, pcmDataBuffer.byteOffset, pcmDataBuffer.byteLength / 2); - break; - case 32: - pcm = new Int32Array(pcmDataBuffer.buffer, pcmDataBuffer.byteOffset, pcmDataBuffer.byteLength / 4); - break; - default: - throw new Error(`Unsupported bits per sample: ${bitsPerSample}`); - } + const headerSize = 44; + const pcmBuffer = wavBuffer.buffer.slice(headerSize); const speaker = new PvSpeaker(sampleRate, bitsPerSample, audioDeviceIndex); console.log(`Using PvSpeaker version: ${speaker.version}`); @@ -98,7 +81,7 @@ async function runDemo() { console.log("Playing audio..."); try { - speaker.writeSync(pcm); + speaker.write(pcmBuffer); speaker.stop(); console.log("Finished playing audio..."); } catch (e) { diff --git a/project/Testing/Temporary/CTestCostData.txt b/project/Testing/Temporary/CTestCostData.txt new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/project/Testing/Temporary/CTestCostData.txt @@ -0,0 +1 @@ +--- diff --git a/project/Testing/Temporary/LastTest.log b/project/Testing/Temporary/LastTest.log new file mode 100644 index 0000000..7f9a02a --- /dev/null +++ b/project/Testing/Temporary/LastTest.log @@ -0,0 +1,3 @@ +Start testing: Jul 10 13:26 PDT +---------------------------------------------------------- +End testing: Jul 10 13:26 PDT diff --git a/project/node/pv_speaker_napi.c b/project/node/pv_speaker_napi.c index a8e8818..f94e531 100644 --- a/project/node/pv_speaker_napi.c +++ b/project/node/pv_speaker_napi.c @@ -232,8 +232,8 @@ napi_value napi_pv_speaker_stop(napi_env env, napi_callback_info info) { } napi_value napi_pv_speaker_write(napi_env env, napi_callback_info info) { - size_t argc = 2; - napi_value args[2]; + size_t argc = 3; + napi_value args[3]; napi_status status = napi_get_cb_info(env, info, &argc, args, NULL, NULL); if (status != napi_ok) { napi_throw_error( @@ -254,50 +254,38 @@ napi_value napi_pv_speaker_write(napi_env env, napi_callback_info info) { return NULL; } - napi_typedarray_type arr_type = -1; - size_t length = 0; - void* pcm_data = NULL; - napi_value arraybuffer = NULL; - size_t byte_offset = 0; - status = napi_get_typedarray_info(env, args[1], &arr_type, &length, &pcm_data, &arraybuffer, &byte_offset); + int32_t bits_per_sample; + status = napi_get_value_int32(env, args[1], &bits_per_sample); if (status != napi_ok) { napi_throw_error( env, - pv_speaker_status_to_string(PV_SPEAKER_STATUS_RUNTIME_ERROR), - "Unable to get typedarray"); - return NULL; - } - if (arr_type != napi_uint8_array && arr_type != napi_int16_array && arr_type != napi_int32_array) { - napi_throw_error( - env, - pv_speaker_status_to_string(PV_SPEAKER_STATUS_RUNTIME_ERROR), - "Invalid type of input pcm buffer. The input frame has to be 'Uint8Array', 'Int16Array', or 'Int32Array'"); + pv_speaker_status_to_string(PV_SPEAKER_STATUS_INVALID_ARGUMENT), + "Unable to get the bits per sample"); return NULL; } - size_t buffer_length = 0; - if (arr_type == napi_uint8_array) { - buffer_length = length * sizeof(uint8_t); - } else if (arr_type == napi_int16_array) { - buffer_length = length * sizeof(int16_t); - } else if (arr_type == napi_int32_array) { - buffer_length = length * sizeof(int32_t); - } - void* data = NULL; - napi_value res = NULL; - status = napi_create_buffer(env, buffer_length, &data, &res); + size_t byte_length = 0; + status = napi_get_arraybuffer_info(env, args[2], &data, &byte_length); if (status != napi_ok) { napi_throw_error( env, pv_speaker_status_to_string(PV_SPEAKER_STATUS_RUNTIME_ERROR), - "Unable to get frame"); + "Unable to get buffer"); return NULL; } - memcpy(data, pcm_data, buffer_length); + + int32_t bytes_per_sample = 1; + if (bits_per_sample == 16) { + bytes_per_sample = 2; + } else if (bits_per_sample == 24) { + bytes_per_sample = 3; + } else if (bits_per_sample == 32) { + bytes_per_sample = 4; + } pv_speaker_status_t pv_speaker_status = pv_speaker_write( - (pv_speaker_t *)(uintptr_t) object_id, (int32_t) length, data); + (pv_speaker_t *)(uintptr_t) object_id, (int32_t) (byte_length / bytes_per_sample), (int8_t *) data); napi_value result; status = napi_create_int32(env, pv_speaker_status, &result);