Skip to content

Commit

Permalink
Improved memory load by limiting backlog - effectively this itme
Browse files Browse the repository at this point in the history
  • Loading branch information
Mattk70 committed Oct 13, 2024
1 parent c64f245 commit 48d5a73
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 4 deletions.
2 changes: 1 addition & 1 deletion js/ui.js
Original file line number Diff line number Diff line change
Expand Up @@ -1106,7 +1106,7 @@ function postAnalyseMessage(args) {
disableMenuItem(['analyseSelection']);
const selection = !!args.end;
const filesInScope = args.filesInScope;
//updateProgress(0);

if (!selection) {
analyseReset();
refreshResultsView();
Expand Down
11 changes: 8 additions & 3 deletions js/worker.js
Original file line number Diff line number Diff line change
Expand Up @@ -1298,7 +1298,7 @@ function checkBacklog(stream) {
const backlog = sumObjectValues(predictionsRequested) - sumObjectValues(predictionsReceived);
DEBUG && console.log('backlog:', backlog);

if (backlog > 200) {
if (backlog >= predictWorkers.length * 4) {
// If queued value is above 100, wait and check again
setTimeout(() => {
checkBacklog(stream)
Expand Down Expand Up @@ -1411,6 +1411,7 @@ function processPredictQueue(audio, file, end, chunkStart){
console.error('Shifted zero length audio from predict queue');
return
}
predictionsRequested[file]++; // do this before any async stuff
setupCtx(audio, undefined, 'model', file).then(offlineCtx => {
let worker;
if (offlineCtx) {
Expand All @@ -1421,6 +1422,7 @@ function processPredictQueue(audio, file, end, chunkStart){
feedChunksToModel(myArray, chunkStart, file, end, worker);
return
}).catch((error) => {
predictionsRequested[file]--; // Didn't request a prediction after all
aborted || console.error(`PredictBuffer rendering failed: ${error}, file ${file}`);
updateFilesBeingProcessed(file);
return
Expand Down Expand Up @@ -1448,7 +1450,10 @@ function processPredictQueue(audio, file, end, chunkStart){
worker = workerInstance;
const myArray = new Float32Array(Array.from({ length: chunkLength }).fill(0));
feedChunksToModel(myArray, chunkStart, file, end);
}}).catch(error => { aborted || console.warn(file, error) })
}}).catch(error => {
aborted || console.warn(file, error) ;
predictionsRequested[file]--; // Didn't request a prediction after all
})
}

const getPredictBuffers = async ({
Expand Down Expand Up @@ -1667,7 +1672,7 @@ function isDuringDaylight(datetime, lat, lon) {
}

async function feedChunksToModel(channelData, chunkStart, file, end, worker) {
predictionsRequested[file]++;

if (worker === undefined) {
// pick a worker - this method is faster than looking for available workers
worker = ++workerInstance >= NUM_WORKERS ? 0 : workerInstance
Expand Down

0 comments on commit 48d5a73

Please sign in to comment.