Skip to content

Commit

Permalink
Use etValue/etValue instead of directly accessing heap.
Browse files Browse the repository at this point in the history
  • Loading branch information
satyajandhyala committed Jul 13, 2024
1 parent dc66046 commit be6c60c
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 48 deletions.
24 changes: 12 additions & 12 deletions js/web/lib/wasm/jsep/init.ts
Original file line number Diff line number Diff line change
Expand Up @@ -69,24 +69,24 @@ class ComputeContextImpl implements ComputeContext {
private customDataSize = 0;
constructor(private module: OrtWasmModule, private backend: WebGpuBackend, contextDataOffset: number) {
this.adapterInfo = backend.adapterInfo;
const heap = module.PTR_SIZE === 4 ? module.HEAPU32 : module.HEAPU64;

// extract context data
const ptrSize = module.PTR_SIZE;
let dataIndex = module.PTR_SIZE === 8 ? (contextDataOffset / 2 ** 3) : (contextDataOffset >> 2);
this.opKernelContext = Number(heap[dataIndex++]);
const inputCount = Number(heap[dataIndex++]);
this.outputCount = Number(heap[dataIndex++]);
this.customDataOffset = Number(heap[dataIndex++]);
this.customDataSize = Number(heap[dataIndex++]);
this.opKernelContext = module.getValue(dataIndex++ * ptrSize, 'i32');
const inputCount = module.getValue(dataIndex++ * ptrSize, 'i32');
this.outputCount = module.getValue(dataIndex++ * ptrSize, 'i32');
this.customDataOffset = module.getValue(dataIndex++ * ptrSize, 'i32');
this.customDataSize = module.getValue(dataIndex++ * ptrSize, 'i32');

const inputs: TensorView[] = [];
for (let i = 0; i < inputCount; i++) {
const dataType = Number(heap[dataIndex++]);
const data = Number(heap[dataIndex++]);
const dim = Number(heap[dataIndex++]);
const dataType = module.getValue(dataIndex++ * ptrSize, 'i32');
const data = module.getValue(dataIndex++ * ptrSize, '*');
const dim = module.getValue(dataIndex++ * ptrSize, 'i32');
const dims: number[] = [];
for (let d = 0; d < dim; d++) {
dims.push(Number(heap[dataIndex++]));
dims.push(module.getValue(dataIndex++ * ptrSize, 'i32'));
}
inputs.push(new TensorViewImpl(module, dataType, data, dims));
}
Expand Down Expand Up @@ -130,9 +130,9 @@ class ComputeContextImpl implements ComputeContext {
try {
const ptrSize = this.module.PTR_SIZE;
const data = this.module.stackAlloc((1 + dims.length) * ptrSize /* sizeof(size_t) */);
this.module.setValue(data, dims.length, '*');
this.module.setValue(data, dims.length, 'i32');
for (let i = 0; i < dims.length; i++) {
this.module.setValue(data + ptrSize * (i + 1), dims[i], '*');
this.module.setValue(data + ptrSize * (i + 1), dims[i], 'i32');
}
return this.module._JsepOutput!(this.opKernelContext, index, data);
} catch (e) {
Expand Down
23 changes: 9 additions & 14 deletions js/web/lib/wasm/wasm-core-impl.ts
Original file line number Diff line number Diff line change
Expand Up @@ -450,12 +450,11 @@ export const prepareInputOutputTensor =
dataByteLength = 4 * data.length;
rawData = wasm._malloc(dataByteLength);
allocs.push(rawData);
let dataIndex = rawData / ptrSize;
for (let i = 0; i < data.length; i++) {
if (typeof data[i] !== 'string') {
throw new TypeError(`tensor data at index ${i} is not a string`);
}
wasm.HEAPU32[dataIndex++] = allocWasmString(data[i], allocs);
wasm.setValue(rawData + i * ptrSize, allocWasmString(data[i], allocs), '*');
}
} else {
dataByteLength = data.byteLength;
Expand All @@ -468,7 +467,7 @@ export const prepareInputOutputTensor =
const stack = wasm.stackSave();
const dimsOffset = wasm.stackAlloc(ptrSize * dims.length);
try {
dims.forEach((d, index) => wasm.setValue(dimsOffset + (index * ptrSize), d, '*'));
dims.forEach((d, index) => wasm.setValue(dimsOffset + (index * ptrSize), d, 'i32'));
const tensor = wasm._OrtCreateTensor(
tensorDataTypeStringToEnum(dataType), rawData, dataByteLength, dimsOffset, dims.length,
dataLocationStringToEnum(location));
Expand Down Expand Up @@ -532,17 +531,13 @@ export const run = async(
enableGraphCapture);
}

let inputValuesIndex = inputValuesOffset / ptrSize;
let inputNamesIndex = inputNamesOffset / ptrSize;
let outputValuesIndex = outputValuesOffset / ptrSize;
let outputNamesIndex = outputNamesOffset / ptrSize;
for (let i = 0; i < inputCount; i++) {
wasm.HEAPU64[inputValuesIndex++] = BigInt(inputTensorHandles[i]);
wasm.HEAPU64[inputNamesIndex++] = BigInt(inputNamesUTF8Encoded[inputIndices[i]]);
wasm.setValue(inputValuesOffset + i * ptrSize, inputTensorHandles[i], 'i64');
wasm.setValue(inputNamesOffset + i * ptrSize, inputNamesUTF8Encoded[inputIndices[i]], 'i64');
}
for (let i = 0; i < outputCount; i++) {
wasm.HEAPU64[outputValuesIndex++] = BigInt(outputTensorHandles[i]);
wasm.HEAPU64[outputNamesIndex++] = BigInt(outputNamesUTF8Encoded[outputIndices[i]]);
wasm.setValue(outputValuesOffset + i * ptrSize, outputTensorHandles[i], 'i64');
wasm.setValue(outputNamesOffset + i * ptrSize, outputNamesUTF8Encoded[outputIndices[i]], 'i64');
}

if (!BUILD_DEFS.DISABLE_JSEP && ioBindingState && !inputOutputBound) {
Expand Down Expand Up @@ -646,10 +641,10 @@ export const run = async(
throw new Error('String tensor is not supported on GPU.');
}
const stringData: string[] = [];
let dataIndex = dataOffset / ptrSize;
for (let i = 0; i < size; i++) {
const offset = wasm.HEAPU32[dataIndex++];
const maxBytesToRead = i === size - 1 ? undefined : wasm.HEAPU32[dataIndex] - offset;
const offset = wasm.getValue(dataOffset + i * ptrSize, '*');
const nextOffset = wasm.getValue(dataOffset + (i + 1) * ptrSize, '*');
const maxBytesToRead = i === size - 1 ? undefined : nextOffset - offset;
stringData.push(wasm.UTF8ToString(offset, maxBytesToRead));
}
output.push([type, dims, stringData, 'cpu']);
Expand Down
45 changes: 23 additions & 22 deletions js/web/lib/wasm/wasm-training-core-impl.ts
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,13 @@ const getModelInputOutputCount = (trainingSessionId: number, isEvalModel: boolea
const wasm = getInstance();
const stack = wasm.stackSave();
try {
const dataOffset = wasm.stackAlloc(8);
const ptrSize = wasm.PTR_SIZE;
const dataOffset = wasm.stackAlloc(2 * ptrSize);
if (wasm._OrtTrainingGetModelInputOutputCount) {
const errorCode =
wasm._OrtTrainingGetModelInputOutputCount(trainingSessionId, dataOffset, dataOffset + 4, isEvalModel);
wasm._OrtTrainingGetModelInputOutputCount(trainingSessionId, dataOffset, dataOffset + ptrSize, isEvalModel);
ifErrCodeCheckLastError(errorCode, 'Can\'t get session input/output count.');
return [wasm.HEAP32[dataOffset / 4], wasm.HEAP32[dataOffset / 4 + 1]];
return [wasm.getValue(dataOffset, 'i32'), wasm.getValue(dataOffset + ptrSize, 'i32')];
} else {
throw new Error(NO_TRAIN_FUNCS_MSG);
}
Expand Down Expand Up @@ -170,10 +171,10 @@ const createAndAllocateTensors =

// moves to heap
const wasm = getInstance();
const valuesOffset = wasm.stackAlloc(count * 4);
let valuesIndex = valuesOffset / 4;
const ptrSize = wasm.PTR_SIZE;
const valuesOffset = wasm.stackAlloc(count * ptrSize);
for (let i = 0; i < count; i++) {
wasm.HEAPU32[valuesIndex++] = tensorHandles[i];
wasm.setValue(valuesOffset + i * ptrSize, tensorHandles[i], '*');
}

return valuesOffset;
Expand All @@ -191,10 +192,11 @@ const moveOutputToTensorMetadataArr =
(outputValuesOffset: number, outputCount: number, outputTensorHandles: number[],
outputTensors: Array<TensorMetadata|null>) => {
const wasm = getInstance();
const ptrSize = wasm.PTR_SIZE;
const output: TensorMetadata[] = [];

for (let i = 0; i < outputCount; i++) {
const tensor = wasm.HEAPU32[outputValuesOffset / 4 + i];
const tensor = wasm.getValue(outputValuesOffset + i * ptrSize, '*');
if (tensor === outputTensorHandles[i]) {
// output tensor is pre-allocated. no need to copy data.
output.push(outputTensors[i]!);
Expand All @@ -211,14 +213,13 @@ const moveOutputToTensorMetadataArr =
tensor, tensorDataOffset, tensorDataOffset + 4, tensorDataOffset + 8, tensorDataOffset + 12);
ifErrCodeCheckLastError(errorCode, `Can't access output tensor data on index ${i}.`);

let tensorDataIndex = tensorDataOffset / 4;
const dataType = wasm.HEAPU32[tensorDataIndex++];
dataOffset = wasm.HEAPU32[tensorDataIndex++];
const dimsOffset = wasm.HEAPU32[tensorDataIndex++];
const dimsLength = wasm.HEAPU32[tensorDataIndex++];
const dataType = wasm.getValue(tensorDataOffset, '*');
dataOffset = wasm.getValue(tensorDataOffset + ptrSize, '*');
const dimsOffset = wasm.getValue(tensorDataOffset + 2 * ptrSize, '*');
const dimsLength = wasm.getValue(tensorDataOffset + 3 * ptrSize, '*');
const dims = [];
for (let i = 0; i < dimsLength; i++) {
dims.push(wasm.HEAPU32[dimsOffset / 4 + i]);
dims.push(wasm.getValue(dimsOffset + i * ptrSize, '*'));
}
wasm._OrtFree(dimsOffset);

Expand All @@ -227,10 +228,10 @@ const moveOutputToTensorMetadataArr =

if (type === 'string') {
const stringData: string[] = [];
let dataIndex = dataOffset / 4;
for (let i = 0; i < size; i++) {
const offset = wasm.HEAPU32[dataIndex++];
const maxBytesToRead = i === size - 1 ? undefined : wasm.HEAPU32[dataIndex] - offset;
const offset = wasm.getValue(dataOffset + i * ptrSize, '*');
const nextOffset = wasm.getValue(dataOffset + (i + 1) * ptrSize, '*');
const maxBytesToRead = i === size - 1 ? undefined : nextOffset - offset;
stringData.push(wasm.UTF8ToString(offset, maxBytesToRead));
}
output.push([type, dims, stringData, 'cpu']);
Expand Down Expand Up @@ -396,14 +397,14 @@ export const runEvalStep = async(
export const getParametersSize = (trainingSessionId: number, trainableOnly: boolean): number => {
const wasm = getInstance();
const stack = wasm.stackSave();

const ptrSize = wasm.PTR_SIZE;
try {
const sizeOffset = wasm.stackAlloc(4);
const sizeOffset = wasm.stackAlloc(ptrSize);
if (wasm._OrtTrainingGetParametersSize) {
const errorCode = wasm._OrtTrainingGetParametersSize(trainingSessionId, sizeOffset, trainableOnly);
ifErrCodeCheckLastError(errorCode, 'Can\'t get parameters size');

return wasm.HEAP32[sizeOffset / 4];
return wasm.getValue(sizeOffset, '*');
} else {
throw new Error(NO_TRAIN_FUNCS_MSG);
}
Expand Down Expand Up @@ -432,7 +433,7 @@ export const getContiguousParameters =

const dimsOffset = wasm.stackAlloc(4);
const dimsIndex = dimsOffset / 4;
wasm.HEAP32[dimsIndex] = parametersSize;
wasm.setValue(dimsIndex, parametersSize, '*');

try {
// wraps allocated array in a tensor
Expand Down Expand Up @@ -488,8 +489,8 @@ export const loadParametersBuffer =
wasm.HEAPU8.set(buffer, bufferOffset);

// allocates and handles moving dimensions information to WASM memory
const dimsOffset = wasm.stackAlloc(4);
wasm.HEAP32[dimsOffset / 4] = bufferCount;
const dimsOffset = wasm.stackAlloc(wasm.PTR_SIZE);
wasm.setValue(dimsOffset, bufferCount, '*');
const dimsLength = 1;
let tensor = 0;

Expand Down

0 comments on commit be6c60c

Please sign in to comment.