diff --git a/package-lock.json b/package-lock.json index 740e6963..ee8a1cc2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -4564,8 +4564,7 @@ "optional": true, "os": [ "linux" - ], - "peer": true + ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { "version": "4.21.3", @@ -5511,11 +5510,10 @@ } }, "node_modules/@types/audioworklet": { - "version": "0.0.58", - "resolved": "https://registry.npmjs.org/@types/audioworklet/-/audioworklet-0.0.58.tgz", - "integrity": "sha512-uHlows3ykQFfxDdMEcLChlCtVI63OvKCKNViOc7pOeyS8JqqjuzAPcp4Yo2QopnEH4Rh54vLauQZKJRgnrBG/A==", - "dev": true, - "license": "Apache-2.0" + "version": "0.0.60", + "resolved": "https://registry.npmjs.org/@types/audioworklet/-/audioworklet-0.0.60.tgz", + "integrity": "sha512-BCYklConpVRbPlNVcjzIhRsPWBTbCFSbkfjBC+VLULryaBI1M651y4nK0SMsSuTgtVSrKVY7Y+fsobCjviuDWA==", + "dev": true }, "node_modules/@types/babel__core": { "version": "7.20.5", @@ -20097,15 +20095,30 @@ }, "devDependencies": { "@jspsych/config": "^2.0.0", - "@types/audioworklet": "^0.0.58", + "@types/audioworklet": "^0.0.60", "@types/mustache": "^4.2.5", "rollup-plugin-dotenv": "^0.5.1", - "rollup-plugin-string-import": "^1.2.4" + "rollup-plugin-string-import": "^1.2.4", + "typescript": "^5.6.2" }, "peerDependencies": { "jspsych": "^8.0.2" } }, + "packages/record/node_modules/typescript": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.2.tgz", + "integrity": "sha512-NW8ByodCSNCwZeghjN3o+JX5OFH0Ojg6sadjEKY4huZ52TqbJTJnDo5+Tw98lSy63NZvi4n+ez5m2u5d4PkZyw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, "packages/style": { "name": "@lookit/style", "version": "0.0.1", diff --git a/packages/record/fixtures/MockWebAudioAPI.ts b/packages/record/fixtures/MockWebAudioAPI.ts new file mode 100644 index 00000000..fb279ba0 --- /dev/null +++ b/packages/record/fixtures/MockWebAudioAPI.ts @@ -0,0 +1,82 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ + +/** Mock the message port, which is needed by the Audio Worklet Processor. */ +export const msgPort = { + addEventListener: jest.fn(), + start: jest.fn(), + close: jest.fn(), + postMessage: jest.fn(), + // eslint-disable-next-line jsdoc/require-jsdoc + onmessage: jest.fn(), +} as unknown as MessagePort; + +/** Mock for Media Stream Audio Source Node. */ +export class MediaStreamAudioSourceNodeMock { + /** + * Mock the MediaStreamAudioSourceNode. + * + * @param _destination - Destination + * @returns This + */ + // eslint-disable-next-line @typescript-eslint/no-unused-vars + public connect(_destination: any): any { + // Return this to support chaining + return this; + } +} + +/** Mock for Audio Worklet Node */ +export class AudioWorkletNodeMock { + /** + * Constructor. + * + * @param _context - Base audio context + * @param _name - Name + */ + // eslint-disable-next-line @typescript-eslint/no-unused-vars + public constructor(_context: any, _name: string) { + this.port = msgPort; + } + public port: MessagePort; + /** + * Connect. + * + * @param _destination - Destination + * @returns This + */ + // eslint-disable-next-line @typescript-eslint/no-unused-vars + public connect(_destination: any): any { + return this; + } +} + +// Define a partial mock for the AudioContext +export const audioContextMock = { + audioWorklet: { + addModule: jest.fn(async () => await Promise.resolve()), + }, + createBuffer: jest.fn( + () => + ({ + getChannelData: jest.fn(() => new Float32Array(256)), + }) as unknown as AudioBuffer, + ), + createBufferSource: jest.fn( + () => + ({ + connect: jest.fn(), + }) as unknown as AudioBufferSourceNode, + ), + // eslint-disable-next-line @typescript-eslint/no-unused-vars + createMediaStreamSource: jest.fn( + (_stream: MediaStream) => new MediaStreamAudioSourceNodeMock(), + ), + sampleRate: 44100, + destination: new AudioWorkletNodeMock(null, ""), // Mock destination + close: jest.fn(), + decodeAudioData: jest.fn(), + resume: jest.fn(), + suspend: jest.fn(), + state: "suspended", + onstatechange: null as any, +} as unknown as AudioContext; // Cast as AudioContext for compatibility diff --git a/packages/record/jest.config.cjs b/packages/record/jest.config.cjs index 75374a84..c98edd37 100644 --- a/packages/record/jest.config.cjs +++ b/packages/record/jest.config.cjs @@ -1,6 +1,7 @@ const config = require("../../jest.cjs").makePackageConfig(); module.exports = { ...config, + coveragePathIgnorePatterns: ["fixtures"], transformIgnorePatterns: ["node_modules/(?!auto-bind)"], testEnvironmentOptions: { ...config.testEnvironmentOptions, diff --git a/packages/record/package.json b/packages/record/package.json index 221703a9..4df3ba24 100644 --- a/packages/record/package.json +++ b/packages/record/package.json @@ -21,6 +21,7 @@ ], "scripts": { "build": "rollup --config", + "buildMicCheck": "npx tsc src/mic_check.ts --target esnext --lib ESNext --types node,audioworklet --skipLibCheck true", "dev": "rollup --config rollup.config.dev.mjs --watch", "test": "jest --coverage" }, @@ -31,10 +32,11 @@ }, "devDependencies": { "@jspsych/config": "^2.0.0", - "@types/audioworklet": "^0.0.58", + "@types/audioworklet": "^0.0.60", "@types/mustache": "^4.2.5", "rollup-plugin-dotenv": "^0.5.1", - "rollup-plugin-string-import": "^1.2.4" + "rollup-plugin-string-import": "^1.2.4", + "typescript": "^5.6.2" }, "peerDependencies": { "jspsych": "^8.0.2" diff --git a/packages/record/src/error.ts b/packages/record/src/error.ts index 4f748630..9cd2ce55 100644 --- a/packages/record/src/error.ts +++ b/packages/record/src/error.ts @@ -62,3 +62,41 @@ export class NoStopPromiseError extends Error { this.name = "NoStopPromiseError"; } } + +/** + * Error thrown when attempting an action that relies on an input stream, such + * as the mic volume check, but no such stream is found. + */ +export class NoStreamError extends Error { + /** + * When attempting an action that requires an input stream, such as the mic + * check, but no stream is found. + */ + public constructor() { + const message = + "No input stream found. Maybe the recorder was not initialized with intializeRecorder."; + super(message); + this.name = "NoStreamError"; + } +} + +/** + * Error thrown if there's a problem setting up the microphone input level + * check. + */ +export class MicCheckError extends Error { + /** + * Occurs if there's a problem setting up the mic check, including setting up + * the audio context and stream source, loading the audio worklet processor + * script, setting up the port message event handler, and resolving the + * promise chain via message events passed to onMicActivityLevel. + * + * @param err - Error passed into this error that is thrown in the catch + * block, if any. + */ + public constructor(err: Error) { + const message = `There was a problem setting up and running the microphone check. ${err.message}`; + super(message); + this.name = "MicCheckError"; + } +} diff --git a/packages/record/src/mic_check.d.ts b/packages/record/src/mic_check.d.ts new file mode 100644 index 00000000..cef60d7c --- /dev/null +++ b/packages/record/src/mic_check.d.ts @@ -0,0 +1 @@ +declare module "*"; diff --git a/packages/record/src/mic_check.js b/packages/record/src/mic_check.js new file mode 100644 index 00000000..7676d12b --- /dev/null +++ b/packages/record/src/mic_check.js @@ -0,0 +1,71 @@ +const SMOOTHING_FACTOR = 0.99; +const SCALING_FACTOR = 5; +/** + * Audio Worklet Processor class for processing audio input streams. This is + * used by the Recorder to run a volume check on the microphone input stream. + * Source: + * https://www.webrtc-developers.com/how-to-know-if-my-microphone-works/#detect-noise-or-silence + */ +export default class MicCheckProcessor extends AudioWorkletProcessor { + _volume; + _micChecked; + /** Constructor for the mic check processor. */ + constructor() { + super(); + this._volume = 0; + this._micChecked = false; + /** + * Callback to handle a message event on the processor's port. This + * determines how the processor responds when the recorder posts a message + * to the processor with e.g. this.processorNode.port.postMessage({ + * micChecked: true }). + * + * @param event - Message event generated from the 'postMessage' call, which + * includes, among other things, the data property. + * @param event.data - Data sent by the message emitter. + */ + this.port.onmessage = (event) => { + if ( + event.data && + event.data.micChecked && + event.data.micChecked == true + ) { + this._micChecked = true; + } + }; + } + /** + * Process method that implements the audio processing algorithm for the Audio + * Processor Worklet. "Although the method is not a part of the + * AudioWorkletProcessor interface, any implementation of + * AudioWorkletProcessor must provide a process() method." Source: + * https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor/process + * The process method can take the following arguments: inputs, outputs, + * parameters. Here we are only using inputs. + * + * @param inputs - An array of inputs from the audio stream (microphone) + * connnected to the node. Each item in the inputs array is an array of + * channels. Each channel is a Float32Array containing 128 samples. For + * example, inputs[n][m][i] will access n-th input, m-th channel of that + * input, and i-th sample of that channel. + * @returns Boolean indicating whether or not the Audio Worklet Node should + * remain active, even if the User Agent thinks it is safe to shut down. In + * this case, when the recorder decides that the mic check criteria has been + * met, it will return false (processor should be shut down), otherwise it + * will return true (processor should remain active). + */ + process(inputs) { + if (this._micChecked) { + return false; + } else { + const input = inputs[0]; + const samples = input[0]; + const sumSquare = samples.reduce((p, c) => p + c * c, 0); + const rms = Math.sqrt(sumSquare / (samples.length || 1)) * SCALING_FACTOR; + this._volume = Math.max(rms, this._volume * SMOOTHING_FACTOR); + this.port.postMessage({ volume: this._volume }); + return true; + } + } +} +registerProcessor("mic-check-processor", MicCheckProcessor); diff --git a/packages/record/src/mic_check.ts b/packages/record/src/mic_check.ts new file mode 100644 index 00000000..4b813d40 --- /dev/null +++ b/packages/record/src/mic_check.ts @@ -0,0 +1,74 @@ +const SMOOTHING_FACTOR = 0.99; +const SCALING_FACTOR = 5; + +/** + * Audio Worklet Processor class for processing audio input streams. This is + * used by the Recorder to run a volume check on the microphone input stream. + * Source: + * https://www.webrtc-developers.com/how-to-know-if-my-microphone-works/#detect-noise-or-silence + */ +export default class MicCheckProcessor extends AudioWorkletProcessor { + private _volume: number; + private _micChecked: boolean; + /** Constructor for the mic check processor. */ + public constructor() { + super(); + this._volume = 0; + this._micChecked = false; + /** + * Callback to handle a message event on the processor's port. This + * determines how the processor responds when the recorder posts a message + * to the processor with e.g. this.processorNode.port.postMessage({ + * micChecked: true }). + * + * @param event - Message event generated from the 'postMessage' call, which + * includes, among other things, the data property. + * @param event.data - Data sent by the message emitter. + */ + this.port.onmessage = (event: MessageEvent) => { + if ( + event.data && + event.data.micChecked && + event.data.micChecked == true + ) { + this._micChecked = true; + } + }; + } + + /** + * Process method that implements the audio processing algorithm for the Audio + * Processor Worklet. "Although the method is not a part of the + * AudioWorkletProcessor interface, any implementation of + * AudioWorkletProcessor must provide a process() method." Source: + * https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor/process + * The process method can take the following arguments: inputs, outputs, + * parameters. Here we are only using inputs. + * + * @param inputs - An array of inputs from the audio stream (microphone) + * connnected to the node. Each item in the inputs array is an array of + * channels. Each channel is a Float32Array containing 128 samples. For + * example, inputs[n][m][i] will access n-th input, m-th channel of that + * input, and i-th sample of that channel. + * @returns Boolean indicating whether or not the Audio Worklet Node should + * remain active, even if the User Agent thinks it is safe to shut down. In + * this case, when the recorder decides that the mic check criteria has been + * met, it will return false (processor should be shut down), otherwise it + * will return true (processor should remain active). + */ + public process(inputs: Float32Array[][]) { + if (this._micChecked) { + return false; + } else { + const input = inputs[0]; + const samples = input[0]; + const sumSquare = samples.reduce((p, c) => p + c * c, 0); + const rms = Math.sqrt(sumSquare / (samples.length || 1)) * SCALING_FACTOR; + this._volume = Math.max(rms, this._volume * SMOOTHING_FACTOR); + this.port.postMessage({ volume: this._volume }); + return true; + } + } +} + +registerProcessor("mic-check-processor", MicCheckProcessor); diff --git a/packages/record/src/recorder.spec.ts b/packages/record/src/recorder.spec.ts index 64228166..409916cf 100644 --- a/packages/record/src/recorder.spec.ts +++ b/packages/record/src/recorder.spec.ts @@ -1,9 +1,13 @@ import Data from "@lookit/data"; import { initJsPsych } from "jspsych"; import Mustache from "mustache"; -import { NoStopPromiseError, RecorderInitializeError } from "./error"; +import webcamFeed from "../templates/webcam-feed.mustache"; +import { + NoStopPromiseError, + NoStreamError, + RecorderInitializeError, +} from "./error"; import Recorder from "./recorder"; -import webcamFeed from "./templates/webcam-feed.mustache"; import { CSSWidthHeight } from "./types"; jest.mock("@lookit/data"); @@ -234,7 +238,7 @@ test("Recorder destroy", async () => { const jsPsych = initJsPsych(); const rec = new Recorder(jsPsych, "prefix"); - expect(rec.s3).not.toBe(null); + expect(rec["s3"]).not.toBe(null); const media = { stop: jest.fn(), @@ -248,7 +252,7 @@ test("Recorder destroy", async () => { expect(media.stop).toHaveBeenCalledTimes(1); expect(media.stream.getTracks).toHaveBeenCalledTimes(1); - expect(rec.s3).toBe(null); + expect(rec["s3"]).toBe(null); expect(Data.LookitS3.prototype.completeUpload).not.toHaveBeenCalled(); }); @@ -269,7 +273,7 @@ test("Recorder destroy with in-progress upload", async () => { const stopPromise = Promise.resolve(); rec["stopPromise"] = stopPromise; - Object.defineProperty(rec.s3, "uploadInProgress", { + Object.defineProperty(rec["s3"], "uploadInProgress", { /** * Overwrite the getter method for S3's uploadInProgress. * @@ -283,7 +287,7 @@ test("Recorder destroy with in-progress upload", async () => { await rec.destroy(); expect(media.stop).toHaveBeenCalledTimes(1); expect(media.stream.getTracks).toHaveBeenCalledTimes(1); - expect(rec.s3).toBe(null); + expect(rec["s3"]).toBe(null); expect(Data.LookitS3.prototype.completeUpload).toHaveBeenCalledTimes(1); }); @@ -311,7 +315,7 @@ test("Recorder destroy with webcam display", async () => { await rec.destroy(); expect(media.stop).toHaveBeenCalledTimes(1); expect(media.stream.getTracks).toHaveBeenCalledTimes(1); - expect(rec.s3).toBe(null); + expect(rec["s3"]).toBe(null); expect(document.body.innerHTML).not.toContain(" { rec.intializeRecorder(stream); expect(jsPsych.pluginAPI.initializeCameraRecorder).toHaveBeenCalled(); - expect(rec.recorder).toBeDefined(); - expect(rec.recorder).not.toBeNull(); - expect(rec.stream).toStrictEqual(stream); + expect(rec["recorder"]).toBeDefined(); + expect(rec["recorder"]).not.toBeNull(); + expect(rec["stream"]).toStrictEqual(stream); +}); + +test("Recorder onMicActivityLevel", () => { + const rec = new Recorder(initJsPsych(), "prefix"); + + type micEventType = { + currentActivityLevel: number; + minVolume: number; + resolve: () => void; + }; + const event_fail = { + currentActivityLevel: 0.0001, + minVolume: rec["minVolume"], + resolve: jest.fn(), + } as micEventType; + + expect(rec.micChecked).toBe(false); + rec["onMicActivityLevel"]( + event_fail.currentActivityLevel, + event_fail.minVolume, + event_fail.resolve, + ); + expect(rec.micChecked).toBe(false); + expect(event_fail.resolve).not.toHaveBeenCalled(); + + const event_pass = { + currentActivityLevel: 0.2, + minVolume: rec["minVolume"], + resolve: jest.fn(), + } as micEventType; + + expect(rec.micChecked).toBe(false); + rec["onMicActivityLevel"]( + event_pass.currentActivityLevel, + event_pass.minVolume, + event_pass.resolve, + ); + expect(rec.micChecked).toBe(true); + expect(event_pass.resolve).toHaveBeenCalled(); +}); + +test("Recorder mic check throws error if no stream", () => { + const jsPsych = initJsPsych(); + const rec = new Recorder(jsPsych, "prefix"); + const media = {}; + jsPsych.pluginAPI.getCameraRecorder = jest.fn().mockReturnValue(media); + expect(async () => { + await rec.checkMic(); + }).rejects.toThrow(NoStreamError); }); test("Recorder download", async () => { diff --git a/packages/record/src/recorder.ts b/packages/record/src/recorder.ts index 1ce3d582..8b6aaffd 100644 --- a/packages/record/src/recorder.ts +++ b/packages/record/src/recorder.ts @@ -3,9 +3,15 @@ import lookitS3 from "@lookit/data/dist/lookitS3"; import autoBind from "auto-bind"; import { JsPsych } from "jspsych"; import Mustache from "mustache"; -import { NoStopPromiseError, RecorderInitializeError } from "./error"; -import webcamFeed from "./templates/webcam-feed.mustache"; +import webcamFeed from "../templates/webcam-feed.mustache"; +import { + MicCheckError, + NoStopPromiseError, + NoStreamError, + RecorderInitializeError, +} from "./error"; import { CSSWidthHeight } from "./types"; +// import MicCheckProcessor from './mic_check'; // TO DO: fix or remove this. See: https://github.com/lookit/lookit-jspsych/issues/44 /** Recorder handles the state of recording and data storage. */ export default class Recorder { @@ -14,11 +20,14 @@ export default class Recorder { process.env.LOCAL_DOWNLOAD?.toLowerCase() === "true"; private filename: string; private stopPromise: Promise | undefined; + private minVolume: number = 0.1; private webcam_element_id = "lookit-jspsych-webcam"; + public micChecked: boolean = false; /** * Use null rather than undefined so that we can set these back to null when * destroying. */ + private processorNode: AudioWorkletNode | null = null; private s3: lookitS3 | null = null; /** * Store the reject function for the stop promise so that we can reject it in @@ -171,6 +180,32 @@ export default class Recorder { )!.srcObject = stream; } + /** + * Perform a sound check on the audio input (microphone). + * + * @param minVol - Minimum mic activity needed to reach the mic check + * threshold (optional). Default is `this.minVolume` + * @returns Promise that resolves when the mic check is complete because the + * audio stream has reached the required minimum level. + */ + public checkMic(minVol: number = this.minVolume) { + if (this.stream) { + const audioContext = new AudioContext(); + const microphone = audioContext.createMediaStreamSource(this.stream); + // This currently loads from lookit-api static files. + // TO DO: load mic_check.js from dist or a URL? See https://github.com/lookit/lookit-jspsych/issues/44 + return audioContext.audioWorklet + .addModule("/static/js/mic_check.js") + .then(() => this.createConnectProcessor(audioContext, microphone)) + .then(() => this.setupPortOnMessage(minVol)) + .catch((err) => { + return Promise.reject(new MicCheckError(err)); + }); + } else { + return Promise.reject(new NoStreamError()); + } + } + /** * Start recording. Also, adds event listeners for handling data and checks * for recorder initialization. @@ -223,12 +258,17 @@ export default class Recorder { * Destroy the recorder. When a plugin/extension destroys the recorder, it * will set the whole Recorder class instance to null, so we don't need to * reset the Recorder instance variables/states. We should complete the S3 - * upload and stop any async processes that might continue to run (stop - * promise). We also need to stop the tracks to release the media devices - * (even if they're not recording). Setting S3 to null should release the - * video blob data from memory. + * upload and stop any async processes that might continue to run (audio + * worklet for the mic check, stop promise). We also need to stop the tracks + * to release the media devices (even if they're not recording). Setting S3 to + * null should release the video blob data from memory. */ public async destroy() { + // Stop the audio worklet processor if it's running + if (this.processorNode !== null) { + this.processorNode.port.postMessage({ micChecked: true }); + this.processorNode = null; + } if (this.stopPromise) { await this.stop(); // Complete any MPU that might've been created @@ -330,6 +370,91 @@ export default class Recorder { return `${prefix}_${new Date().getTime()}.webm`; } + /** + * Private helper to handle the mic level messages that are sent via an + * AudioWorkletProcessor. This checks the current level against the minimum + * threshold, and if the threshold is met, sets the micChecked property to + * true and resolves the checkMic promise. + * + * @param currentActivityLevel - Microphone activity level calculated by the + * processor node. + * @param minVolume - Minimum microphone activity level needed to pass the + * microphone check. + * @param resolve - Resolve callback function for Promise returned by the + * checkMic method. + */ + private onMicActivityLevel( + currentActivityLevel: number, + minVolume: number, + resolve: () => void, + ) { + if (currentActivityLevel > minVolume) { + this.micChecked = true; + this.processorNode?.port.postMessage({ micChecked: true }); + this.processorNode = null; + resolve(); + } + } + + /** + * Private helper that takes the audio context and microphone, creates the + * processor node for the mic check input level processing, and connects the + * microphone to the processor node. + * + * @param audioContext - Audio context that was created in checkMic. This is + * used to create the processor node. + * @param microphone - Microphone audio stream source, created in checkMic. + * The processor node will be connected to this source. + * @returns Promise that resolves after the processor node has been created, + * and the microphone audio stream source is connected to the processor node + * and audio context destination. + */ + private createConnectProcessor( + audioContext: AudioContext, + microphone: MediaStreamAudioSourceNode, + ) { + return new Promise((resolve) => { + this.processorNode = new AudioWorkletNode( + audioContext, + "mic-check-processor", + ); + microphone.connect(this.processorNode).connect(audioContext.destination); + resolve(); + }); + } + + /** + * Private helper to setup the port's on message event handler for the mic + * check processor node. This adds the event related callback, which calls + * onMicActivityLevel with the event data. + * + * @param minVol - Minimum volume level (RMS amplitude) passed from checkMic. + * @returns Promise that resolves from inside the onMicActivityLevel callback, + * when the mic stream input level has reached the threshold. + */ + private setupPortOnMessage(minVol: number) { + return new Promise((resolve) => { + /** + * Callback on the microphone's AudioWorkletNode that fires in response to + * a message event containing the current mic level. When the mic level + * reaches the threshold, this callback sets the micChecked property to + * true and resolves this Promise (via onMicActivityLevel). + * + * @param event - The message event that was sent from the processor on + * the audio worklet node. Contains a 'data' property (object) which + * contains a 'volume' property (number). + */ + this.processorNode!.port.onmessage = (event: MessageEvent) => { + // handle message from the processor: event.data + if (this.onMicActivityLevel) { + if ("data" in event && "volume" in event.data) { + this.onMicActivityLevel(event.data.volume, minVol, resolve); + } + } + }; + }); + } + /** * Check access to webcam/mic stream. * diff --git a/packages/record/src/recorder_WebAudioAPI.spec.ts b/packages/record/src/recorder_WebAudioAPI.spec.ts new file mode 100644 index 00000000..a76d1edd --- /dev/null +++ b/packages/record/src/recorder_WebAudioAPI.spec.ts @@ -0,0 +1,264 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +import { initJsPsych } from "jspsych"; +import { + audioContextMock, + AudioWorkletNodeMock, +} from "../fixtures/MockWebAudioAPI"; +import { MicCheckError } from "./error"; +import Recorder from "./recorder"; + +// Some of the recorder's methods rely on the WebAudio API, which is not available in Node/Jest/jsdom, so we'll mock it here. +// This is in a separate file to avoid polluting the other test environments with the WebAudio API mocks. + +global.AudioContext = jest.fn(() => audioContextMock) as any; +global.AudioWorkletNode = AudioWorkletNodeMock as any; + +/** Add mock registerProcessor to the global scope. */ +global.registerProcessor = () => {}; + +afterEach(() => { + jest.clearAllMocks(); +}); + +test("Recorder check mic", async () => { + const jsPsych = initJsPsych(); + const rec = new Recorder(jsPsych, "prefix"); + const media = { + stream: { getTracks: jest.fn().mockReturnValue([{ stop: jest.fn() }]) }, + }; + jsPsych.pluginAPI.getCameraRecorder = jest.fn().mockReturnValue(media); + + // Mock the resolution of the last promise in checkMic so that it resolves and we can check the mocks/spies. + rec["setupPortOnMessage"] = jest + .fn() + .mockReturnValue(() => Promise.resolve()); + + const createMediaStreamSourceSpy = jest.spyOn( + audioContextMock, + "createMediaStreamSource", + ); + const addModuleSpy = jest.spyOn(audioContextMock.audioWorklet, "addModule"); + const createConnectProcessorSpy = jest.spyOn(rec, "createConnectProcessor"); + + const expectedAudioContext = new AudioContext(); + const expectedMicrophone = expectedAudioContext.createMediaStreamSource( + rec["stream"], + ); + + await rec.checkMic(); + + expect(createMediaStreamSourceSpy).toHaveBeenCalledWith(media.stream); + expect(addModuleSpy).toHaveBeenCalledWith("/static/js/mic_check.js"); + expect(createConnectProcessorSpy).toHaveBeenCalledWith( + expectedAudioContext, + expectedMicrophone, + ); + expect(rec["setupPortOnMessage"]).toHaveBeenCalledWith(rec["minVolume"]); +}); + +test("Throws MicCheckError with createConnectProcessor error", () => { + const jsPsych = initJsPsych(); + const rec = new Recorder(jsPsych, "prefix"); + const media = { + stream: { getTracks: jest.fn().mockReturnValue([{ stop: jest.fn() }]) }, + }; + jsPsych.pluginAPI.getCameraRecorder = jest.fn().mockReturnValue(media); + + // Mock the resolution of the last promise in checkMic to make sure that the rejection/error occurs before this point. + rec["setupPortOnMessage"] = jest + .fn() + .mockReturnValue(() => Promise.resolve()); + expect(async () => await rec.checkMic()).resolves; + + const mockError = jest.fn(() => { + const promise = new Promise(() => { + throw "Error"; + }); + promise.catch(() => null); // Prevent an uncaught error here so that it propogates to the catch block. + return promise; + }); + rec["createConnectProcessor"] = mockError; + expect(async () => await rec.checkMic()).rejects.toThrow(MicCheckError); +}); + +test("Throws MicCheckError with addModule error", () => { + const jsPsych = initJsPsych(); + const rec = new Recorder(jsPsych, "prefix"); + const media = { + stream: { getTracks: jest.fn().mockReturnValue([{ stop: jest.fn() }]) }, + }; + jsPsych.pluginAPI.getCameraRecorder = jest.fn().mockReturnValue(media); + + // Mock the resolution of the last promise in checkMic to make sure that the rejection/error occurs before this point. + rec["setupPortOnMessage"] = jest + .fn() + .mockReturnValue(() => Promise.resolve()); + expect(async () => await rec.checkMic()).resolves; + + const mockError = jest.fn(() => { + const promise = new Promise(() => { + throw "Error"; + }); + promise.catch(() => null); // Prevent an uncaught error here so that it propogates to the catch block. + return promise; + }); + audioContextMock.audioWorklet.addModule = mockError; + expect(async () => await rec.checkMic()).rejects.toThrow(MicCheckError); +}); + +test("Throws MicCheckError with setupPortOnMessage error", () => { + const jsPsych = initJsPsych(); + const rec = new Recorder(jsPsych, "prefix"); + const media = { + stream: { getTracks: jest.fn().mockReturnValue([{ stop: jest.fn() }]) }, + }; + jsPsych.pluginAPI.getCameraRecorder = jest.fn().mockReturnValue(media); + + // Mock the resolution of the last promise in checkMic to make sure that the rejection/error occurs before this point. + rec["setupPortOnMessage"] = jest + .fn() + .mockReturnValue(() => Promise.resolve()); + expect(async () => await rec.checkMic()).resolves; + + const mockError = jest.fn(() => { + const promise = new Promise(() => { + throw "Error"; + }); + promise.catch(() => null); // Prevent an uncaught error here so that it propogates to the catch block. + return promise; + }); + rec["setupPortOnMessage"] = mockError; + expect(async () => await rec.checkMic()).rejects.toThrow(MicCheckError); +}); + +test("checkMic should process microphone input and handle messages", () => { + const jsPsych = initJsPsych(); + const rec = new Recorder(jsPsych, "prefix"); + const media = { + stream: { getTracks: jest.fn().mockReturnValue([{ stop: jest.fn() }]) }, + }; + jsPsych.pluginAPI.getCameraRecorder = jest.fn().mockReturnValue(media); + + const onMicActivityLevelSpy = jest.spyOn(rec, "onMicActivityLevel" as never); + + expect(rec["processorNode"]).toBe(null); + + // Setup the processor node. + const audioContext = new AudioContext(); + rec["processorNode"] = new AudioWorkletNode( + audioContext, + "mic-check-processor", + ); + expect(rec["processorNode"]).not.toBeNull(); + rec["setupPortOnMessage"](rec["minVolume"]); + expect(rec["processorNode"].port.onmessage).toBeTruthy(); + + expect(rec.micChecked).toBe(false); + + // Simulate a failing event + const failVol = 0.0001; + const mockEventFail = { data: { volume: failVol } } as MessageEvent; + if ( + rec["processorNode"] && + rec["processorNode"].port && + rec["processorNode"].port.onmessage + ) { + rec["processorNode"].port.onmessage(mockEventFail); + } + + // Verify onMicActivityLevel is called with params and micChecked is still false. + expect(onMicActivityLevelSpy).toHaveBeenCalledWith( + failVol, + rec["minVolume"], + expect.any(Function), + ); + expect(rec.micChecked).toBe(false); + + // Simulate a passing event + const passVol = 0.6; + const mockEventPass = { data: { volume: passVol } } as MessageEvent; + if ( + rec["processorNode"] && + rec["processorNode"].port && + rec["processorNode"].port.onmessage + ) { + rec["processorNode"].port.onmessage(mockEventPass); + } + + // Verify onMicActivityLevel is called with params and micChecked is set to true. + expect(onMicActivityLevelSpy).toHaveBeenCalledWith( + passVol, + rec["minVolume"], + expect.any(Function), + ); + expect(rec.micChecked).toBe(true); +}); + +test("Destroy method should set processorNode to null", async () => { + const jsPsych = initJsPsych(); + const rec = new Recorder(jsPsych, "prefix"); + const media = { + stop: jest.fn(), + stream: { getTracks: jest.fn().mockReturnValue([{ stop: jest.fn() }]) }, + }; + jsPsych.pluginAPI.getCameraRecorder = jest.fn().mockReturnValue(media); + + expect(rec["processorNode"]).toBe(null); + + // Setup the processor node. + const audioContext = new AudioContext(); + rec["processorNode"] = new AudioWorkletNode( + audioContext, + "mic-check-processor", + ); + expect(rec["processorNode"]).toBeTruthy(); + rec["setupPortOnMessage"](rec["minVolume"]); + expect(rec["processorNode"].port.onmessage).toBeTruthy(); + + expect(rec["processorNode"]).toBeTruthy(); + await rec.destroy(); + expect(rec["processorNode"]).toBe(null); +}); + +test("Recorder setupPortOnMessage should setup port's on message callback", () => { + const jsPsych = initJsPsych(); + const rec = new Recorder(jsPsych, "prefix"); + const media = { + stop: jest.fn(), + stream: { getTracks: jest.fn().mockReturnValue([{ stop: jest.fn() }]) }, + }; + jsPsych.pluginAPI.getCameraRecorder = jest.fn().mockReturnValue(media); + + expect(rec["processorNode"]).toBe(null); + + // Setup the processor node. + const audioContext = new AudioContext(); + rec["processorNode"] = new AudioWorkletNode( + audioContext, + "mic-check-processor", + ); + expect(rec["processorNode"]).toBeTruthy(); + + rec["onMicActivityLevel"] = jest.fn(); + + rec["setupPortOnMessage"](rec["minVolume"]); + expect(rec["processorNode"].port.onmessage).toBeTruthy(); + + // Simulate a message event to test the message event callback. + const passVol = 0.6; + const mockEventPass = { data: { volume: passVol } } as MessageEvent; + if ( + rec["processorNode"] && + rec["processorNode"].port && + rec["processorNode"].port.onmessage + ) { + rec["processorNode"].port.onmessage(mockEventPass); + } + + // The port message event should trigger onMicActivityLevel. + expect(rec["onMicActivityLevel"]).toHaveBeenCalledWith( + passVol, + rec["minVolume"], + expect.any(Function), + ); +}); diff --git a/packages/record/src/stop.ts b/packages/record/src/stop.ts index ad85f44f..90e97568 100644 --- a/packages/record/src/stop.ts +++ b/packages/record/src/stop.ts @@ -1,9 +1,9 @@ import { LookitWindow } from "@lookit/data/dist/types"; import { JsPsych, JsPsychPlugin } from "jspsych"; import Mustache from "mustache"; +import uploadingVideo from "../templates/uploading-video.mustache"; import { NoSessionRecordingError } from "./error"; import Recorder from "./recorder"; -import uploadingVideo from "./templates/uploading-video.mustache"; declare let window: LookitWindow; diff --git a/packages/record/src/templates/uploading-video.mustache b/packages/record/templates/uploading-video.mustache similarity index 100% rename from packages/record/src/templates/uploading-video.mustache rename to packages/record/templates/uploading-video.mustache diff --git a/packages/record/src/templates/webcam-feed.mustache b/packages/record/templates/webcam-feed.mustache similarity index 100% rename from packages/record/src/templates/webcam-feed.mustache rename to packages/record/templates/webcam-feed.mustache