diff --git a/js/BirdNet2.4.js b/js/BirdNet2.4.js index 8fac4cee..cf195e92 100644 --- a/js/BirdNet2.4.js +++ b/js/BirdNet2.4.js @@ -71,7 +71,7 @@ onmessage = async (e) => { break; } case "predict": { - if (myModel.model_loaded) { + if (myModel?.model_loaded) { const { chunks, start, fileStart, file, snr, confidence, worker, context, resetResults } = e.data; myModel.useContext = context; myModel.selection = !resetResults; diff --git a/js/model.js b/js/model.js index 693a75ff..c72fc2f8 100644 --- a/js/model.js +++ b/js/model.js @@ -82,7 +82,7 @@ onmessage = async (e) => { break; } case "predict": { - if (! myModel.model_loaded) { return console.log("worker", worker, "received a prediction request before it was ready") } + if (! myModel?.model_loaded) { return console.log("worker", worker, "received a prediction request before it was ready") } const { chunks, start, fileStart, file, snr, confidence, context, resetResults } = e.data; myModel.useContext = context; myModel.selection = !resetResults; diff --git a/js/ui.js b/js/ui.js index b1aa25c9..5aeb2e59 100644 --- a/js/ui.js +++ b/js/ui.js @@ -1679,7 +1679,7 @@ const defaultConfig = { filters: { active: false, highPassFrequency: 0, lowShelfFrequency: 0, lowShelfAttenuation: 0, SNR: 0, sendToModel: false }, warmup: true, hasNode: false, - tensorflow: { threads: DIAGNOSTICS['Cores'], batchSize: 32 }, + tensorflow: { threads: DIAGNOSTICS['Cores'], batchSize: 8 }, webgpu: { threads: 2, batchSize: 8 }, webgl: { threads: 2, batchSize: 32 }, audio: { gain: 0, format: 'mp3', bitrate: 192, quality: 5, downmix: false, padding: false, @@ -1894,7 +1894,7 @@ const setUpWorkerMessaging = () => { customAnalysisAllMenu(args.result) break; } - case "analysis-complete": {onAnalysisComplete(); + case "analysis-complete": {onAnalysisComplete(args); break; } case "audio-file-to-save": {onSaveAudio(args); @@ -2761,7 +2761,12 @@ function centreSpec(){ /////////// Keyboard Shortcuts //////////// const GLOBAL_ACTIONS = { // eslint-disable-line - a: function (e) { ( e.ctrlKey || e.metaKey) && currentFile && document.getElementById('analyse').click()}, + a: function (e) { + if (( e.ctrlKey || e.metaKey) && currentFile) { + const element = e.shiftKey ? 'analyseAll' : 'analyse'; + document.getElementById(element).click(); + } + }, A: function (e) { ( e.ctrlKey || e.metaKey) && currentFile && document.getElementById('analyseAll').click()}, c: function (e) { // Center window on playhead @@ -3210,10 +3215,11 @@ function centreSpec(){ } } - function onAnalysisComplete(){ + function onAnalysisComplete({quiet}){ PREDICTING = false; STATE.analysisDone = true; DOM.progressDiv.classList.add('d-none'); + if (quiet) return // DIAGNOSTICS: t1_analysis = Date.now(); const analysisTime = ((t1_analysis - t0_analysis) / 1000).toFixed(2); diff --git a/js/worker.js b/js/worker.js index 8f6da286..fcc019ad 100644 --- a/js/worker.js +++ b/js/worker.js @@ -390,6 +390,13 @@ async function handleMessage(e) { break; } case "analyse": { + if (!predictWorkers.length) { + UI.postMessage({event: 'generate-alert', type: 'warning', + message: `A previous analysis resulted in an out-of-memory error, it is recommended you reduce the batch size from ${BATCH_SIZE}` + }) + UI.postMessage({event: 'analysis-complete', quiet: true}); + break; + } predictionsReceived = {}; predictionsRequested = {}; await onAnalyse(args); @@ -959,21 +966,20 @@ async function onAnalyse({ function onAbort({ model = STATE.model, - list = 'nocturnal', + list = STATE.list, }) { aborted = true; FILE_QUEUE = []; - index = 0; - DEBUG && console.log("abort received") - if (filesBeingProcessed.length) { - //restart the workers - terminateWorkers(); - spawnPredictWorkers(model, list, BATCH_SIZE, NUM_WORKERS) - } predictQueue = []; filesBeingProcessed = []; predictionsReceived = {}; predictionsRequested = {}; + index = 0; + DEBUG && console.log("abort received") + //restart the workers + terminateWorkers(); + spawnPredictWorkers(model, list, BATCH_SIZE, NUM_WORKERS) + } const getDuration = async (src) => { @@ -1283,7 +1289,7 @@ function setupCtx(audio, rate, destination, file) { offlineSource.start(); return offlineCtx; }) - .catch(error => console.warn(error, file)); + .catch(error => aborted || console.warn(error, file)); }; @@ -1401,7 +1407,10 @@ const getWavePredictBuffers = async ({ function processPredictQueue(audio, file, end, chunkStart){ if (! audio) [audio, file, end, chunkStart] = predictQueue.shift(); // Dequeue chunk - audio.length === 0 && console.warn('Shifted zero length audio from predict queue') + if (audio.length === 0) { + console.error('Shifted zero length audio from predict queue'); + return + } setupCtx(audio, undefined, 'model', file).then(offlineCtx => { let worker; if (offlineCtx) { @@ -1412,18 +1421,49 @@ function processPredictQueue(audio, file, end, chunkStart){ feedChunksToModel(myArray, chunkStart, file, end, worker); return }).catch((error) => { - console.error(`PredictBuffer rendering failed: ${error}, file ${file}`); + aborted || console.error(`PredictBuffer rendering failed: ${error}, file ${file}`); updateFilesBeingProcessed(file); return }); } else { - console.log('Short chunk', audio.length, 'padding'); - let chunkLength = STATE.model === 'birdnet' ? 144_000 : 72_000; - workerInstance = ++workerInstance >= NUM_WORKERS ? 0 : workerInstance; - worker = workerInstance; - const myArray = new Float32Array(Array.from({ length: chunkLength }).fill(0)); - feedChunksToModel(myArray, chunkStart, file, end); - }}).catch(error => { console.warn(file, error) }) + if (audio.length === 0){ + if (!aborted){ + // If the audio length is 0 now, we must have run out of memory + console.error(`Out of memory. Batchsize reduction from ${BATCH_SIZE} recommended`); + aborted = true; + // Hard quit + terminateWorkers(); + UI.postMessage({event: 'analysis-complete', quiet: true}) + const message = ` +

System memory exhausted, the operation has been terminated.

+

+ Tip: Lower the batch size from ${BATCH_SIZE} in the system settings.

`; + UI.postMessage({event: 'generate-alert', type: 'error', message: message}) + // Let's do a system notification here: + if (Notification.permission === "granted") { + // Check whether notification permissions have already been granted; + // if so, create a notification + const sysMsg = `System memory exhausted. Aborting analysis. Tip: reduce batch size from ${BATCH_SIZE}`; + const notification = new Notification(sysMsg, {requireInteraction: true, icon: 'img/icon/chirpity_logo2.png'}); + } else if (Notification.permission !== "denied") { + // We need to ask the user for permission + Notification.requestPermission().then((permission) => { + // If the user accepts, let's create a notification + if (permission === "granted") { + const notification = new Notification(sysMsg, {requireInteraction: true, icon: 'img/icon/chirpity_logo2.png'}); + } + }); + } + return + } + } + console.log('Short chunk', audio.length, 'padding'); + let chunkLength = STATE.model === 'birdnet' ? 144_000 : 72_000; + workerInstance = ++workerInstance >= NUM_WORKERS ? 0 : workerInstance; + worker = workerInstance; + const myArray = new Float32Array(Array.from({ length: chunkLength }).fill(0)); + feedChunksToModel(myArray, chunkStart, file, end); + }}).catch(error => { aborted || console.warn(file, error) }) } const getPredictBuffers = async ({ @@ -2087,11 +2127,10 @@ function spawnPredictWorkers(model, list, batchSize, threads) { } const terminateWorkers = () => { - predictWorkers.forEach(worker => { - worker.postMessage({ message: 'abort' }) + predictWorkers.forEach(worker => { worker.terminate() }) - predictWorkers = []; + predictWorkers = []; } async function batchInsertRecords(cname, label, files, originalCname) { @@ -2181,7 +2220,6 @@ const insertDurations = async (file, id) => { .map(entry => `(${entry.toString()},${id})`).join(','); // No "OR IGNORE" in this statement because it should only run when the file is new const result = await STATE.db.runAsync(`INSERT INTO duration VALUES ${durationSQL}`); - console.log('durations added ', result.changes) } const generateInsertQuery = async (latestResult, file) => { @@ -2367,11 +2405,7 @@ function updateFilesBeingProcessed(file) { if (!STATE.selection) getSummary(); // Need this here in case last file is not sent for analysis (e.g. nocmig mode) UI.postMessage({event: 'analysis-complete'}) - // // refresh the webgpu backend - // if (STATE.detect.backend === 'webgpu' ) { - // terminateWorkers(); - // spawnPredictWorkers(STATE.model, STATE.list, BATCH_SIZE, NUM_WORKERS) - // } + } } @@ -3705,8 +3739,10 @@ async function convertAndOrganiseFiles(threadLimit) { let summaryMessage; if (attempted) { + let type = 'notice'; summaryMessage = `Processing complete: ${successfulConversions} successful, ${failedConversions} failed.`; if (failedConversions > 0) { + type = 'warning'; summaryMessage += `
Failed conversion reasons: