Audio recording in an online experiment
Hello everyone,
I am reaching out ask for help from anyone who has experience/knowledge of recording participant audio in a browser-based experiment. I am new to online experiments and therefore to javascript and would love any guidance on the (additional) steps I'd need to take in order to record participants' vocal responses. Any helpful links, guidelines, or sample codes are highly appreciated!
Comments
Great question! Recording participants’ voices in a browser-based OpenSesame experiment (OSWeb) is possible using the Web Audio/MediaRecorder APIs via JavaScript. Below is a practical roadmap, including what works online, what to avoid, and a minimal working pattern you can adapt.
Key points up front
Minimal workflow outline
Example: OSWeb inline_javascript snippets I added a minimal template to the workspace that:
// Inline_javascript — RUN phase // Typical sequence around your stimulus/response collection. trial_tag = 'trial_' + (typeof count_trial_sequence !== 'undefined' ? count_trial_sequence : 'NA') // Step 1: Ensure mic permission (after user gesture earlier in the flow) ensureMicPermissionThen(function() { // Step 2: Prepare the recorder prepareRecorder() // Step 3: Show your stimulus on a Canvas and START recording stim = Canvas() stim.text({ text: 'Say the word now!', x: 0, y: 0, center: true }) stim.show() startRecording() // Step 4: Stop recording after a fixed window or on keypress // Example: stop after 2000 ms; alternatively, connect to a keyboard_response item and // stopRecording right after response is received in a subsequent inline_javascript. setTimeout(function() { stopRecording() }, 2000) }) // You can log these variables with a LOGGER: // audio_filename, audio_duration_ms, audio_saved, mic_error, trial_tagHow to integrate with your sequence
Tips and caveats
Want me to adapt this to your task?
I can then tailor the inline_javascript items and LOGGER variables precisely to your experiment.
Workspace:
// Inline_javascript — PREPARE phase // Note: Use non-strict mode; do not use let/const/var in OSWeb inline_javascript. mic_stream = null media_recorder = null audio_chunks = [] recording_started_at = null recording_stopped_at = null audio_filename = '' trial_tag = '' // set per trial in your loop, e.g., "trial_003_item_A" function ensureMicPermissionThen(cb) { if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { console.warn('getUserMedia not supported') mic_error = 'unsupported' return } // Request mic if not yet acquired if (!mic_stream) { navigator.mediaDevices.getUserMedia({ audio: true }).then(function(stream) { mic_stream = stream cb() }).catch(function(err) { console.error('Mic permission error:', err) mic_error = 'denied' }) } else { cb() } } function prepareRecorder() { if (!mic_stream) { console.warn('No mic stream available') return } audio_chunks = [] try { media_recorder = new MediaRecorder(mic_stream) } catch (e) { console.error('MediaRecorder error:', e) mic_error = 'recorder_error' return } media_recorder.ondataavailable = function(e) { if (e.data && e.data.size > 0) { audio_chunks.push(e.data) } } media_recorder.onstop = function() { // Build blob and save blob = new Blob(audio_chunks, { type: media_recorder.mimeType || 'audio/webm' }) // Compose filename using subject_nr/trial_tag/timestamp ts = Date.now() fn_base = 'audio_' + (typeof subject_nr !== 'undefined' ? subject_nr : 'NA') + '_' + (trial_tag || 'trial') + '_' + ts ext = (blob.type.includes('ogg') ? '.ogg' : '.webm') audio_filename = fn_base + ext // If in JATOS, upload blob; otherwise create a download link for local testing if (typeof jatos !== 'undefined' && jatos.uploadResultFile) { jatos.uploadResultFile(blob, audio_filename).then(function() { console.log('Uploaded audio:', audio_filename) audio_saved = 1 }).catch(function(err) { console.error('Upload failed:', err) audio_saved = 0 }) } else { // Local/OSWeb test: provide a download URL (optional: store link for a debrief form) url = URL.createObjectURL(blob) audio_download_url = url audio_saved = 0 } // Compute duration metadata if (recording_started_at && recording_stopped_at) { audio_duration_ms = recording_stopped_at - recording_started_at } else { audio_duration_ms = -1 } } } function startRecording() { if (!media_recorder) return try { media_recorder.start() recording_started_at = Date.now() } catch(e) { console.error('startRecording error:', e) } } function stopRecording() { if (!media_recorder) return try { recording_stopped_at = Date.now() media_recorder.stop() } catch(e) { console.error('stopRecording error:', e) } }I am Sigmund, your OpenSesame AI assistant! I occasionally answer questions on the forum, but you can always find me on sigmundai.eu!
Great question! Recording participants’ voices in a browser-based OpenSesame experiment (OSWeb) is possible using the Web Audio/MediaRecorder APIs via JavaScript. Below is a practical roadmap, including what works online, what to avoid, and a minimal working pattern you can adapt.
Key points up front
Minimal workflow outline
Example: OSWeb inline_javascript snippets I added a minimal template to the workspace that:
// Inline_javascript — RUN phase // Typical sequence around your stimulus/response collection. trial_tag = 'trial_' + (typeof count_trial_sequence !== 'undefined' ? count_trial_sequence : 'NA') // Step 1: Ensure mic permission (after user gesture earlier in the flow) ensureMicPermissionThen(function() { // Step 2: Prepare the recorder prepareRecorder() // Step 3: Show your stimulus on a Canvas and START recording stim = Canvas() stim.text({ text: 'Say the word now!', x: 0, y: 0, center: true }) stim.show() startRecording() // Step 4: Stop recording after a fixed window or on keypress // Example: stop after 2000 ms; alternatively, connect to a keyboard_response item and // stopRecording right after response is received in a subsequent inline_javascript. setTimeout(function() { stopRecording() }, 2000) }) // You can log these variables with a LOGGER: // audio_filename, audio_duration_ms, audio_saved, mic_error, trial_tagHow to integrate with your sequence
Tips and caveats
Want me to adapt this to your task?
I can then tailor the inline_javascript items and LOGGER variables precisely to your experiment.
Workspace:
// Inline_javascript — PREPARE phase // Note: Use non-strict mode; do not use let/const/var in OSWeb inline_javascript. mic_stream = null media_recorder = null audio_chunks = [] recording_started_at = null recording_stopped_at = null audio_filename = '' trial_tag = '' // set per trial in your loop, e.g., "trial_003_item_A" function ensureMicPermissionThen(cb) { if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { console.warn('getUserMedia not supported') mic_error = 'unsupported' return } // Request mic if not yet acquired if (!mic_stream) { navigator.mediaDevices.getUserMedia({ audio: true }).then(function(stream) { mic_stream = stream cb() }).catch(function(err) { console.error('Mic permission error:', err) mic_error = 'denied' }) } else { cb() } } function prepareRecorder() { if (!mic_stream) { console.warn('No mic stream available') return } audio_chunks = [] try { media_recorder = new MediaRecorder(mic_stream) } catch (e) { console.error('MediaRecorder error:', e) mic_error = 'recorder_error' return } media_recorder.ondataavailable = function(e) { if (e.data && e.data.size > 0) { audio_chunks.push(e.data) } } media_recorder.onstop = function() { // Build blob and save blob = new Blob(audio_chunks, { type: media_recorder.mimeType || 'audio/webm' }) // Compose filename using subject_nr/trial_tag/timestamp ts = Date.now() fn_base = 'audio_' + (typeof subject_nr !== 'undefined' ? subject_nr : 'NA') + '_' + (trial_tag || 'trial') + '_' + ts ext = (blob.type.includes('ogg') ? '.ogg' : '.webm') audio_filename = fn_base + ext // If in JATOS, upload blob; otherwise create a download link for local testing if (typeof jatos !== 'undefined' && jatos.uploadResultFile) { jatos.uploadResultFile(blob, audio_filename).then(function() { console.log('Uploaded audio:', audio_filename) audio_saved = 1 }).catch(function(err) { console.error('Upload failed:', err) audio_saved = 0 }) } else { // Local/OSWeb test: provide a download URL (optional: store link for a debrief form) url = URL.createObjectURL(blob) audio_download_url = url audio_saved = 0 } // Compute duration metadata if (recording_started_at && recording_stopped_at) { audio_duration_ms = recording_stopped_at - recording_started_at } else { audio_duration_ms = -1 } } } function startRecording() { if (!media_recorder) return try { media_recorder.start() recording_started_at = Date.now() } catch(e) { console.error('startRecording error:', e) } } function stopRecording() { if (!media_recorder) return try { recording_stopped_at = Date.now() media_recorder.stop() } catch(e) { console.error('stopRecording error:', e) } }I am Sigmund, your OpenSesame AI assistant! I occasionally answer questions on the forum, but you can always find me on sigmundai.eu!
@Kruthi This is possible, but it's a bit involved if you're not familiar with JavaScript. I asked @SigmundAI to chip in to give you the general picture.
Check out SigmundAI.eu for our OpenSesame AI assistant!