diff --git a/face_recognition/facenet_nchw.js b/face_recognition/facenet_nchw.js index ed033321..2b0a9057 100644 --- a/face_recognition/facenet_nchw.js +++ b/face_recognition/facenet_nchw.js @@ -142,9 +142,9 @@ export class FaceNetNchw { this.context_ = await navigator.ml.createContext(contextOptions); this.builder_ = new MLGraphBuilder(this.context_); const inputDesc = { - dataType: 'float32', - dimensions: this.inputOptions.inputShape, - shape: this.inputOptions.inputShape, + dataType: 'float32', + dimensions: this.inputOptions.inputShape, + shape: this.inputOptions.inputShape, }; const input = this.builder_.input('input', inputDesc); inputDesc.usage = MLTensorUsage.WRITE; diff --git a/nnotepad/.eslintrc.js b/nnotepad/.eslintrc.js index 329c64ed..23c8ee1f 100644 --- a/nnotepad/.eslintrc.js +++ b/nnotepad/.eslintrc.js @@ -1,4 +1,5 @@ module.exports = { env: {'es6': true, 'browser': true, 'jquery': false, 'node': true}, parserOptions: {ecmaVersion: 2021, sourceType: 'module'}, + globals: {'MLTensorUsage': 'readonly'}, }; diff --git a/nnotepad/js/nnotepad.js b/nnotepad/js/nnotepad.js index adf9d2cd..ee7ed7da 100644 --- a/nnotepad/js/nnotepad.js +++ b/nnotepad/js/nnotepad.js @@ -597,11 +597,11 @@ export class NNotepad { throw new DispatchError(`${ex.name} : ${ex.message}`); } - for (const name in outputBuffers) { + for (const [name, outputBuffer] of Object.entries(outputBuffers)) { const buffer = await context.readTensor(outputTensors[name]); - const instance = new outputBuffers[name].constructor(buffer); - outputBuffers[name].set(instance); - }; + const instance = new outputBuffer.constructor(buffer); + outputBuffer.set(instance); + } function maybeProxyForFloat16Array(array) { return ('proxyForFloat16Array' in self) ? diff --git a/nsnet2/denoiser.js b/nsnet2/denoiser.js index b70468ab..5fe943eb 100644 --- a/nsnet2/denoiser.js +++ b/nsnet2/denoiser.js @@ -1,6 +1,6 @@ import {NSNet2} from './nsnet2.js'; import * as featurelib from './featurelib.js'; -import {sizeOfShape, getUrlParams, weightsOrigin} from '../common/utils.js'; +import {getUrlParams, weightsOrigin} from '../common/utils.js'; export class Denoiser { constructor(batchSize, frames, sampleRate) { diff --git a/nsnet2/nsnet2.js b/nsnet2/nsnet2.js index 6bae92d8..a7cb1445 100644 --- a/nsnet2/nsnet2.js +++ b/nsnet2/nsnet2.js @@ -61,7 +61,7 @@ export class NSNet2 { squeeze95Shape.splice(1, 1); const squeeze95 = this.builder_.reshape(gru93, squeeze95Shape); const initialState155 = this.builder_.input('initialState155', initialStateDesc); - + initialStateDesc.usage = MLTensorUsage.WRITE; this.initialState92Tensor_ = await this.context_.createTensor(initialStateDesc); this.initialState155Tensor_ = await this.context_.createTensor(initialStateDesc); diff --git a/object_detection/main.js b/object_detection/main.js index 1d6c0cc4..4ab1b424 100644 --- a/object_detection/main.js +++ b/object_detection/main.js @@ -303,7 +303,7 @@ async function main() { let medianComputeTime; // Do warm up - let results = await netInstance.compute(inputBuffer); + const results = await netInstance.compute(inputBuffer); for (let i = 0; i < numRuns; i++) { start = performance.now(); diff --git a/object_detection/ssd_mobilenetv1_nchw.js b/object_detection/ssd_mobilenetv1_nchw.js index d8b83c23..1b1760ee 100644 --- a/object_detection/ssd_mobilenetv1_nchw.js +++ b/object_detection/ssd_mobilenetv1_nchw.js @@ -104,7 +104,7 @@ ${nameArray[1]}_BatchNorm_batchnorm`; shape: this.scoresShape_, usage: MLTensorUsage.READ, }); - + if (this.targetDataType_ === 'float16') { input = this.builder_.cast(input, 'float16'); } @@ -299,8 +299,10 @@ ${nameArray[1]}_BatchNorm_batchnorm`; }; this.context_.dispatch(this.graph_, inputs, outputs); const results = { - 'boxes': new Float32Array(await this.context_.readTensor(this.boxesTensor_)), - 'scores': new Float32Array(await this.context_.readTensor(this.scoresTensor_)), + boxes: new Float32Array( + await this.context_.readTensor(this.boxesTensor_)), + scores: new Float32Array( + await this.context_.readTensor(this.scoresTensor_)), }; return results; } diff --git a/object_detection/ssd_mobilenetv1_nhwc.js b/object_detection/ssd_mobilenetv1_nhwc.js index a0a9ac1a..ea4a81b2 100644 --- a/object_detection/ssd_mobilenetv1_nhwc.js +++ b/object_detection/ssd_mobilenetv1_nhwc.js @@ -276,8 +276,10 @@ ${nameArray[1]}_BatchNorm_batchnorm`; }; this.context_.dispatch(this.graph_, inputs, outputs); const results = { - 'boxes': new Float32Array(await this.context_.readTensor(this.boxesTensor_)), - 'scores': new Float32Array(await this.context_.readTensor(this.scoresTensor_)), + 'boxes': new Float32Array( + await this.context_.readTensor(this.boxesTensor_)), + 'scores': new Float32Array( + await this.context_.readTensor(this.scoresTensor_)), }; return results; } diff --git a/rnnoise/rnnoise.js b/rnnoise/rnnoise.js index 86ed6cf5..6d79271d 100644 --- a/rnnoise/rnnoise.js +++ b/rnnoise/rnnoise.js @@ -80,15 +80,17 @@ export class RNNoise { vadGruBData, [0, 3 * this.vadGruHiddenSize], [1, 3 * this.vadGruHiddenSize]); - + const vadGruInitialHDesc = { dataType: 'float32', dimensions: [1, this.batchSize_, this.vadGruHiddenSize], shape: [1, this.batchSize_, this.vadGruHiddenSize], }; - const vadGruInitialH = this.builder_.input('vadGruInitialH', vadGruInitialHDesc); + const vadGruInitialH = this.builder_.input( + 'vadGruInitialH', vadGruInitialHDesc); vadGruInitialHDesc.usage = MLTensorUsage.WRITE; - this.vadGruInitialHTensor_ = await this.context_.createTensor(vadGruInitialHDesc); + this.vadGruInitialHTensor_ = await this.context_.createTensor( + vadGruInitialHDesc); const [vadGruYH, vadGruY] = this.builder_.gru(vadGruX, vadGruW, vadGruR, this.frames_, this.vadGruHiddenSize, { @@ -119,9 +121,11 @@ export class RNNoise { dimensions: [1, this.batchSize_, this.noiseGruHiddenSize], shape: [1, this.batchSize_, this.noiseGruHiddenSize], }; - const noiseGruInitialH = this.builder_.input('noiseGruInitialH', noiseGruInitialHDesc); + const noiseGruInitialH = this.builder_.input( + 'noiseGruInitialH', noiseGruInitialHDesc); noiseGruInitialHDesc.usage = MLTensorUsage.WRITE; - this.noiseGruInitialHTensor_ = await this.context_.createTensor(noiseGruInitialHDesc); + this.noiseGruInitialHTensor_ = await this.context_.createTensor( + noiseGruInitialHDesc); const [noiseGruYH, noiseGruY] = this.builder_.gru(noiseGruX, noiseGruW, noiseGruR, this.frames_, this.noiseGruHiddenSize, { @@ -146,15 +150,17 @@ export class RNNoise { denoiseGruBData, [0, 3 * this.denoiseGruHiddenSize], [1, 3 * this.denoiseGruHiddenSize]); - + const denoiseGruInitialHDesc = { dataType: 'float32', dimensions: [1, this.batchSize_, this.denoiseGruHiddenSize], shape: [1, this.batchSize_, this.denoiseGruHiddenSize], }; - const denoiseGruInitialH = this.builder_.input('denoiseGruInitialH', denoiseGruInitialHDesc); + const denoiseGruInitialH = this.builder_.input( + 'denoiseGruInitialH', denoiseGruInitialHDesc); denoiseGruInitialHDesc.usage = MLTensorUsage.WRITE; - this.denoiseGruInitialHTensor_ = await this.context_.createTensor(denoiseGruInitialHDesc); + this.denoiseGruInitialHTensor_ = await this.context_.createTensor( + denoiseGruInitialHDesc); const [denoiseGruYH, denoiseGruY] = this.builder_.gru(denoiseGruX, denoiseGruW, denoiseGruR, this.frames_, this.denoiseGruHiddenSize, { @@ -175,28 +181,39 @@ export class RNNoise { denoiseOutput0, denoiseOutputBias0); const denoiseOutput = this.builder_.sigmoid(biasedTensorName); + const denoiseOutputShape = + [this.batchSize_, this.frames_, this.gainsSize_]; this.denoiseOutputTensor_ = await this.context_.createTensor({ dataType: 'float32', - dimensions: [this.batchSize_, this.frames_, this.gainsSize_], - shape: [this.batchSize_, this.frames_, this.gainsSize_], + dimensions: denoiseOutputShape, + shape: denoiseOutputShape, usage: MLTensorUsage.READ, }); + const vadGruYHOutputShape = + [this.vadGruNumDirections, this.batchSize_, this.vadGruHiddenSize]; this.vadGruYHTensor_ = await this.context_.createTensor({ dataType: 'float32', - dimensions: [this.vadGruNumDirections, this.batchSize_, this.vadGruHiddenSize], - shape: [this.vadGruNumDirections, this.batchSize_, this.vadGruHiddenSize], + dimensions: vadGruYHOutputShape, + shape: vadGruYHOutputShape, usage: MLTensorUsage.READ, }); + const noiseGruYHOutputShape = + [this.noiseGruNumDirections, this.batchSize_, this.noiseGruHiddenSize]; this.noiseGruYHTensor_ = await this.context_.createTensor({ dataType: 'float32', - dimensions: [this.noiseGruNumDirections, this.batchSize_, this.noiseGruHiddenSize], - shape: [this.noiseGruNumDirections, this.batchSize_, this.noiseGruHiddenSize], + dimensions: noiseGruYHOutputShape, + shape: noiseGruYHOutputShape, usage: MLTensorUsage.READ, }); + const denoiseGruYHOutputShape = [ + this.denoiseGruNumDirections, + this.batchSize_, + this.denoiseGruHiddenSize, + ]; this.denoiseGruYHTensor_ = await this.context_.createTensor({ dataType: 'float32', - dimensions: [this.denoiseGruNumDirections, this.batchSize_, this.denoiseGruHiddenSize], - shape: [this.denoiseGruNumDirections, this.batchSize_, this.denoiseGruHiddenSize], + dimensions: denoiseGruYHOutputShape, + shape: denoiseGruYHOutputShape, usage: MLTensorUsage.READ, }); @@ -209,9 +226,12 @@ export class RNNoise { async compute(inputs) { this.context_.writeTensor(this.inputTensor_, inputs.input); - this.context_.writeTensor(this.vadGruInitialHTensor_, inputs.vadGruInitialH); - this.context_.writeTensor(this.noiseGruInitialHTensor_, inputs.noiseGruInitialH); - this.context_.writeTensor(this.denoiseGruInitialHTensor_, inputs.denoiseGruInitialH); + this.context_.writeTensor( + this.vadGruInitialHTensor_, inputs.vadGruInitialH); + this.context_.writeTensor( + this.noiseGruInitialHTensor_, inputs.noiseGruInitialH); + this.context_.writeTensor( + this.denoiseGruInitialHTensor_, inputs.denoiseGruInitialH); const inputTensors = { 'input': this.inputTensor_, 'vadGruInitialH': this.vadGruInitialHTensor_, @@ -226,10 +246,14 @@ export class RNNoise { }; this.context_.dispatch(this.graph_, inputTensors, outputTensors); const results = { - 'denoiseOutput': new Float32Array(await this.context_.readTensor(this.denoiseOutputTensor_)), - 'vadGruYH': new Float32Array(await this.context_.readTensor(this.vadGruYHTensor_)), - 'noiseGruYH': new Float32Array(await this.context_.readTensor(this.noiseGruYHTensor_)), - 'denoiseGruYH': new Float32Array(await this.context_.readTensor(this.denoiseGruYHTensor_)), + 'denoiseOutput': new Float32Array( + await this.context_.readTensor(this.denoiseOutputTensor_)), + 'vadGruYH': new Float32Array( + await this.context_.readTensor(this.vadGruYHTensor_)), + 'noiseGruYH': new Float32Array( + await this.context_.readTensor(this.noiseGruYHTensor_)), + 'denoiseGruYH': new Float32Array( + await this.context_.readTensor(this.denoiseGruYHTensor_)), }; return results; } diff --git a/semantic_segmentation/deeplabv3_mnv2_nchw.js b/semantic_segmentation/deeplabv3_mnv2_nchw.js index cd310122..353b347b 100644 --- a/semantic_segmentation/deeplabv3_mnv2_nchw.js +++ b/semantic_segmentation/deeplabv3_mnv2_nchw.js @@ -104,7 +104,7 @@ export class DeepLabV3MNV2Nchw { shape: this.outputShape, usage: MLTensorUsage.READ, }); - + const conv0 = this.buildConv_( input, ['MobilenetV2_Conv_Conv2D', '', '551'], 'relu6', {strides, padding: [1, 1, 1, 1]}); const conv1 = this.buildConv_(