diff --git a/.analyze.js.swp b/.analyze.js.swp deleted file mode 100644 index de9614f..0000000 Binary files a/.analyze.js.swp and /dev/null differ diff --git a/.app.js.swp b/.app.js.swp deleted file mode 100644 index 8df8837..0000000 Binary files a/.app.js.swp and /dev/null differ diff --git a/analyze.js b/analyze.js index 4ab497f..cc78858 100644 --- a/analyze.js +++ b/analyze.js @@ -1,117 +1,117 @@ function startPitchDetection(callback) { - // Check for browser support - if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { - console.error("getUserMedia is not supported in this browser."); - return; - } + // Check for browser support + if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { + console.error("getUserMedia is not supported in this browser."); + return; + } - // Request access to the microphone - //navigator.mediaDevices.getUserMedia({ audio: true }) + // Request access to the microphone + //navigator.mediaDevices.getUserMedia({ audio: true }) navigator.mediaDevices.getUserMedia({ - audio: { - echoCancellation: false, - noiseSuppression: false, - autoGainControl: false - } -}) - .then(stream => { - // Create an audio context and an analyser node - const audioContext = new (window.AudioContext || window.webkitAudioContext)(); - const analyser = audioContext.createAnalyser(); - analyser.fftSize = 2048; // Set FFT size; higher values offer more precision but more latency + audio: { + echoCancellation: false, + noiseSuppression: false, + autoGainControl: false + } + }) + .then(stream => { + // Create an audio context and an analyser node + const audioContext = new (window.AudioContext || window.webkitAudioContext)(); + const analyser = audioContext.createAnalyser(); + analyser.fftSize = 2048; // Set FFT size; higher values offer more precision but more latency - // Connect the microphone stream to the analyser node - const source = audioContext.createMediaStreamSource(stream); - source.connect(analyser); + // Connect the microphone stream to the analyser node + const source = audioContext.createMediaStreamSource(stream); + source.connect(analyser); - // Create a buffer to hold the time domain data - const bufferLength = analyser.fftSize; - const buffer = new Float32Array(bufferLength); + // Create a buffer to hold the time domain data + const bufferLength = analyser.fftSize; + const buffer = new Float32Array(bufferLength); - /** - * Autocorrelation algorithm to estimate pitch from the audio buffer. - * Returns the estimated pitch in Hz, or -1 if no pitch is detected. - */ - function autoCorrelate(buf, sampleRate) { - const SIZE = buf.length; - let rms = 0; + /** + * Autocorrelation algorithm to estimate pitch from the audio buffer. + * Returns the estimated pitch in Hz, or -1 if no pitch is detected. + */ + function autoCorrelate(buf, sampleRate) { + const SIZE = buf.length; + let rms = 0; - // Compute Root Mean Square (RMS) to check if there's enough signal - for (let i = 0; i < SIZE; i++) { - const val = buf[i]; - rms += val * val; - } - rms = Math.sqrt(rms / SIZE); - if (rms < 0.01) // Signal too weak – likely silence - return -1; + // Compute Root Mean Square (RMS) to check if there's enough signal + for (let i = 0; i < SIZE; i++) { + const val = buf[i]; + rms += val * val; + } + rms = Math.sqrt(rms / SIZE); + if (rms < 0.01) // Signal too weak – likely silence + return -1; - // Trim the buffer to remove noise at the beginning and end - let r1 = 0, r2 = SIZE - 1; - for (let i = 0; i < SIZE; i++) { - if (Math.abs(buf[i]) < 0.2) { r1 = i; break; } - } - for (let i = 1; i < SIZE; i++) { - if (Math.abs(buf[SIZE - i]) < 0.2) { r2 = SIZE - i; break; } - } - const trimmedBuffer = buf.slice(r1, r2); - const trimmedSize = trimmedBuffer.length; + // Trim the buffer to remove noise at the beginning and end + let r1 = 0, r2 = SIZE - 1; + for (let i = 0; i < SIZE; i++) { + if (Math.abs(buf[i]) < 0.2) { r1 = i; break; } + } + for (let i = 1; i < SIZE; i++) { + if (Math.abs(buf[SIZE - i]) < 0.2) { r2 = SIZE - i; break; } + } + const trimmedBuffer = buf.slice(r1, r2); + const trimmedSize = trimmedBuffer.length; - // Calculate the autocorrelation of the trimmed buffer - const correlations = new Array(trimmedSize).fill(0); - for (let lag = 0; lag < trimmedSize; lag++) { - for (let i = 0; i < trimmedSize - lag; i++) { - correlations[lag] += trimmedBuffer[i] * trimmedBuffer[i + lag]; - } - } + // Calculate the autocorrelation of the trimmed buffer + const correlations = new Array(trimmedSize).fill(0); + for (let lag = 0; lag < trimmedSize; lag++) { + for (let i = 0; i < trimmedSize - lag; i++) { + correlations[lag] += trimmedBuffer[i] * trimmedBuffer[i + lag]; + } + } - // Find the first dip in the autocorrelation – skip lags before this point - let d = 0; - while (d < correlations.length - 1 && correlations[d] > correlations[d + 1]) { - d++; - } + // Find the first dip in the autocorrelation – skip lags before this point + let d = 0; + while (d < correlations.length - 1 && correlations[d] > correlations[d + 1]) { + d++; + } - // Search for the peak correlation after the dip - let maxVal = -1, maxPos = -1; - for (let i = d; i < correlations.length; i++) { - if (correlations[i] > maxVal) { - maxVal = correlations[i]; - maxPos = i; - } - } + // Search for the peak correlation after the dip + let maxVal = -1, maxPos = -1; + for (let i = d; i < correlations.length; i++) { + if (correlations[i] > maxVal) { + maxVal = correlations[i]; + maxPos = i; + } + } - // Parabolic interpolation for a more accurate peak estimate - let T0 = maxPos; - if (T0 > 0 && T0 < correlations.length - 1) { - const x1 = correlations[T0 - 1]; - const x2 = correlations[T0]; - const x3 = correlations[T0 + 1]; - const a = (x1 + x3 - 2 * x2) / 2; - const b = (x3 - x1) / 2; - if (a !== 0) { - T0 = T0 - b / (2 * a); - } - } + // Parabolic interpolation for a more accurate peak estimate + let T0 = maxPos; + if (T0 > 0 && T0 < correlations.length - 1) { + const x1 = correlations[T0 - 1]; + const x2 = correlations[T0]; + const x3 = correlations[T0 + 1]; + const a = (x1 + x3 - 2 * x2) / 2; + const b = (x3 - x1) / 2; + if (a !== 0) { + T0 = T0 - b / (2 * a); + } + } - // Convert lag to frequency - const pitch = sampleRate / T0; - return pitch; - } + // Convert lag to frequency + const pitch = sampleRate / T0; + return pitch; + } - // Continuously update and detect pitch - function updatePitch() { - // Get the latest time-domain data - analyser.getFloatTimeDomainData(buffer); - // Estimate pitch using our autocorrelation function - const pitch = autoCorrelate(buffer, audioContext.sampleRate); - // Pass the detected pitch (in Hz) to the provided callback - callback(pitch); - // Continue the update loop - requestAnimationFrame(updatePitch); - } + // Continuously update and detect pitch + function updatePitch() { + // Get the latest time-domain data + analyser.getFloatTimeDomainData(buffer); + // Estimate pitch using our autocorrelation function + const pitch = autoCorrelate(buffer, audioContext.sampleRate); + // Pass the detected pitch (in Hz) to the provided callback + callback(pitch); + // Continue the update loop + requestAnimationFrame(updatePitch); + } - updatePitch(); - }) - .catch(err => { - console.error("Error accessing the microphone: ", err); - }); + updatePitch(); + }) + .catch(err => { + console.error("Error accessing the microphone: ", err); + }); }