Skip to content

Commit

Permalink
bump version
Browse files Browse the repository at this point in the history
fix command-shift-a shortcut
reduced default batch size for tensofrlow to 8
Added logic to handle out of memory errors
  • Loading branch information
Mattk70 committed Oct 12, 2024
1 parent 1929def commit 1a32229
Show file tree
Hide file tree
Showing 6 changed files with 3,535 additions and 35 deletions.
2 changes: 1 addition & 1 deletion js/BirdNet2.4.js
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ onmessage = async (e) => {
break;
}
case "predict": {
if (myModel.model_loaded) {
if (myModel?.model_loaded) {
const { chunks, start, fileStart, file, snr, confidence, worker, context, resetResults } = e.data;
myModel.useContext = context;
myModel.selection = !resetResults;
Expand Down
2 changes: 1 addition & 1 deletion js/model.js
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ onmessage = async (e) => {
break;
}
case "predict": {
if (! myModel.model_loaded) { return console.log("worker", worker, "received a prediction request before it was ready") }
if (! myModel?.model_loaded) { return console.log("worker", worker, "received a prediction request before it was ready") }
const { chunks, start, fileStart, file, snr, confidence, context, resetResults } = e.data;
myModel.useContext = context;
myModel.selection = !resetResults;
Expand Down
14 changes: 10 additions & 4 deletions js/ui.js
Original file line number Diff line number Diff line change
Expand Up @@ -1679,7 +1679,7 @@ const defaultConfig = {
filters: { active: false, highPassFrequency: 0, lowShelfFrequency: 0, lowShelfAttenuation: 0, SNR: 0, sendToModel: false },
warmup: true,
hasNode: false,
tensorflow: { threads: DIAGNOSTICS['Cores'], batchSize: 32 },
tensorflow: { threads: DIAGNOSTICS['Cores'], batchSize: 8 },
webgpu: { threads: 2, batchSize: 8 },
webgl: { threads: 2, batchSize: 32 },
audio: { gain: 0, format: 'mp3', bitrate: 192, quality: 5, downmix: false, padding: false,
Expand Down Expand Up @@ -1894,7 +1894,7 @@ const setUpWorkerMessaging = () => {
customAnalysisAllMenu(args.result)
break;
}
case "analysis-complete": {onAnalysisComplete();
case "analysis-complete": {onAnalysisComplete(args);
break;
}
case "audio-file-to-save": {onSaveAudio(args);
Expand Down Expand Up @@ -2761,7 +2761,12 @@ function centreSpec(){
/////////// Keyboard Shortcuts ////////////

const GLOBAL_ACTIONS = { // eslint-disable-line
a: function (e) { ( e.ctrlKey || e.metaKey) && currentFile && document.getElementById('analyse').click()},
a: function (e) {
if (( e.ctrlKey || e.metaKey) && currentFile) {
const element = e.shiftKey ? 'analyseAll' : 'analyse';
document.getElementById(element).click();
}
},
A: function (e) { ( e.ctrlKey || e.metaKey) && currentFile && document.getElementById('analyseAll').click()},
c: function (e) {
// Center window on playhead
Expand Down Expand Up @@ -3210,10 +3215,11 @@ function centreSpec(){
}
}

function onAnalysisComplete(){
function onAnalysisComplete({quiet}){
PREDICTING = false;
STATE.analysisDone = true;
DOM.progressDiv.classList.add('d-none');
if (quiet) return
// DIAGNOSTICS:
t1_analysis = Date.now();
const analysisTime = ((t1_analysis - t0_analysis) / 1000).toFixed(2);
Expand Down
92 changes: 64 additions & 28 deletions js/worker.js
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,13 @@ async function handleMessage(e) {
break;
}
case "analyse": {
if (!predictWorkers.length) {
UI.postMessage({event: 'generate-alert', type: 'warning',
message: `A previous analysis resulted in an out-of-memory error, it is recommended you reduce the batch size from ${BATCH_SIZE}`
})
UI.postMessage({event: 'analysis-complete', quiet: true});
break;
}
predictionsReceived = {};
predictionsRequested = {};
await onAnalyse(args);
Expand Down Expand Up @@ -959,21 +966,20 @@ async function onAnalyse({

function onAbort({
model = STATE.model,
list = 'nocturnal',
list = STATE.list,
}) {
aborted = true;
FILE_QUEUE = [];
index = 0;
DEBUG && console.log("abort received")
if (filesBeingProcessed.length) {
//restart the workers
terminateWorkers();
spawnPredictWorkers(model, list, BATCH_SIZE, NUM_WORKERS)
}
predictQueue = [];
filesBeingProcessed = [];
predictionsReceived = {};
predictionsRequested = {};
index = 0;
DEBUG && console.log("abort received")
//restart the workers
terminateWorkers();
spawnPredictWorkers(model, list, BATCH_SIZE, NUM_WORKERS)

}

const getDuration = async (src) => {
Expand Down Expand Up @@ -1283,7 +1289,7 @@ function setupCtx(audio, rate, destination, file) {
offlineSource.start();
return offlineCtx;
})
.catch(error => console.warn(error, file));
.catch(error => aborted || console.warn(error, file));
};


Expand Down Expand Up @@ -1401,7 +1407,10 @@ const getWavePredictBuffers = async ({
function processPredictQueue(audio, file, end, chunkStart){

if (! audio) [audio, file, end, chunkStart] = predictQueue.shift(); // Dequeue chunk
audio.length === 0 && console.warn('Shifted zero length audio from predict queue')
if (audio.length === 0) {
console.error('Shifted zero length audio from predict queue');
return
}
setupCtx(audio, undefined, 'model', file).then(offlineCtx => {
let worker;
if (offlineCtx) {
Expand All @@ -1412,18 +1421,49 @@ function processPredictQueue(audio, file, end, chunkStart){
feedChunksToModel(myArray, chunkStart, file, end, worker);
return
}).catch((error) => {
console.error(`PredictBuffer rendering failed: ${error}, file ${file}`);
aborted || console.error(`PredictBuffer rendering failed: ${error}, file ${file}`);
updateFilesBeingProcessed(file);
return
});
} else {
console.log('Short chunk', audio.length, 'padding');
let chunkLength = STATE.model === 'birdnet' ? 144_000 : 72_000;
workerInstance = ++workerInstance >= NUM_WORKERS ? 0 : workerInstance;
worker = workerInstance;
const myArray = new Float32Array(Array.from({ length: chunkLength }).fill(0));
feedChunksToModel(myArray, chunkStart, file, end);
}}).catch(error => { console.warn(file, error) })
if (audio.length === 0){
if (!aborted){
// If the audio length is 0 now, we must have run out of memory
console.error(`Out of memory. Batchsize reduction from ${BATCH_SIZE} recommended`);
aborted = true;
// Hard quit
terminateWorkers();
UI.postMessage({event: 'analysis-complete', quiet: true})
const message = `
<p class="text-danger h6">System memory exhausted, the operation has been terminated. </p>
<p>
<b>Tip:</b> Lower the batch size from ${BATCH_SIZE} in the system settings.<p>`;
UI.postMessage({event: 'generate-alert', type: 'error', message: message})
// Let's do a system notification here:
if (Notification.permission === "granted") {
// Check whether notification permissions have already been granted;
// if so, create a notification
const sysMsg = `System memory exhausted. Aborting analysis. Tip: reduce batch size from ${BATCH_SIZE}`;
const notification = new Notification(sysMsg, {requireInteraction: true, icon: 'img/icon/chirpity_logo2.png'});
} else if (Notification.permission !== "denied") {
// We need to ask the user for permission
Notification.requestPermission().then((permission) => {
// If the user accepts, let's create a notification
if (permission === "granted") {
const notification = new Notification(sysMsg, {requireInteraction: true, icon: 'img/icon/chirpity_logo2.png'});
}
});
}
return
}
}
console.log('Short chunk', audio.length, 'padding');
let chunkLength = STATE.model === 'birdnet' ? 144_000 : 72_000;
workerInstance = ++workerInstance >= NUM_WORKERS ? 0 : workerInstance;
worker = workerInstance;
const myArray = new Float32Array(Array.from({ length: chunkLength }).fill(0));
feedChunksToModel(myArray, chunkStart, file, end);
}}).catch(error => { aborted || console.warn(file, error) })
}

const getPredictBuffers = async ({
Expand Down Expand Up @@ -2087,11 +2127,10 @@ function spawnPredictWorkers(model, list, batchSize, threads) {
}

const terminateWorkers = () => {
predictWorkers.forEach(worker => {
worker.postMessage({ message: 'abort' })
predictWorkers.forEach(worker => {
worker.terminate()
})
predictWorkers = [];
predictWorkers = [];
}

async function batchInsertRecords(cname, label, files, originalCname) {
Expand Down Expand Up @@ -2181,7 +2220,6 @@ const insertDurations = async (file, id) => {
.map(entry => `(${entry.toString()},${id})`).join(',');
// No "OR IGNORE" in this statement because it should only run when the file is new
const result = await STATE.db.runAsync(`INSERT INTO duration VALUES ${durationSQL}`);
console.log('durations added ', result.changes)
}

const generateInsertQuery = async (latestResult, file) => {
Expand Down Expand Up @@ -2367,11 +2405,7 @@ function updateFilesBeingProcessed(file) {
if (!STATE.selection) getSummary();
// Need this here in case last file is not sent for analysis (e.g. nocmig mode)
UI.postMessage({event: 'analysis-complete'})
// // refresh the webgpu backend
// if (STATE.detect.backend === 'webgpu' ) {
// terminateWorkers();
// spawnPredictWorkers(STATE.model, STATE.list, BATCH_SIZE, NUM_WORKERS)
// }

}
}

Expand Down Expand Up @@ -3705,8 +3739,10 @@ async function convertAndOrganiseFiles(threadLimit) {
let summaryMessage;

if (attempted) {
let type = 'notice';
summaryMessage = `Processing complete: ${successfulConversions} successful, ${failedConversions} failed.`;
if (failedConversions > 0) {
type = 'warning';
summaryMessage += `<br>Failed conversion reasons:<br><ul>`;
failureReasons.forEach(reason => {
summaryMessage += `<li>${reason}</li>`;
Expand All @@ -3717,7 +3753,7 @@ async function convertAndOrganiseFiles(threadLimit) {

// Post the summary message
UI.postMessage({
event: `generate-alert`,
event: `generate-alert`, type: type,
message: summaryMessage
});
})
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "chirpity",
"version": "2.0.0",
"version": "2.0.1",
"description": "Chirpity Nocmig",
"main": "main.js",
"scripts": {
Expand Down
Loading

0 comments on commit 1a32229

Please sign in to comment.