Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
preprocess(data, width, height) {
// data processing
const dataTensor = ndarray(new Float32Array(data), [width, height, 4]);
const dataProcessedTensor = ndarray(new Float32Array(width * height * 3), [1, 3, width, height]);
ops.divseq(dataTensor, 128.0);
ops.subseq(dataTensor, 1.0);
ops.assign(dataProcessedTensor.pick(0, 0, null, null), dataTensor.pick(null, null, 2));
ops.assign(dataProcessedTensor.pick(0, 1, null, null), dataTensor.pick(null, null, 1));
ops.assign(dataProcessedTensor.pick(0, 2, null, null), dataTensor.pick(null, null, 0));
const tensor = new onnx.Tensor(dataProcessedTensor.data, 'float32', [1, 3, width, height]);
return tensor;
}
}
async postprocess(tensor: Tensor, inferenceTime: number) {
try {
const originalOutput = new Tensor(tensor.data as Float32Array, 'float32', [1, 125, 13, 13]);
const outputTensor = yoloTransforms.transpose(originalOutput, [0, 2, 3, 1]);
// postprocessing
const boxes = await yolo.postprocess(outputTensor, 20);
boxes.forEach(box => {
const {
top, left, bottom, right, classProb, className,
} = box;
this.drawRect(left, top, right-left, bottom-top,
`${className} Confidence: ${Math.round(classProb * 100)}% Time: ${inferenceTime.toFixed(1)}ms`);
});
} catch (e) {
alert('Model is not valid!');
}
}
preprocess(ctx: CanvasRenderingContext2D): Tensor {
const imageData = ctx.getImageData(0, 0, ctx.canvas.width, ctx.canvas.height);
const { data, width, height } = imageData;
// data processing
const dataTensor = ndarray(new Float32Array(data), [width, height, 4]);
const dataProcessedTensor = ndarray(new Float32Array(width * height * 3), [1, 3, width, height]);
ops.assign(dataProcessedTensor.pick(0, 0, null, null), dataTensor.pick(null, null, 0));
ops.assign(dataProcessedTensor.pick(0, 1, null, null), dataTensor.pick(null, null, 1));
ops.assign(dataProcessedTensor.pick(0, 2, null, null), dataTensor.pick(null, null, 2));
const tensor = new Tensor(new Float32Array(width* height* 3), 'float32', [1, 3, width, height]);
(tensor.data as Float32Array).set(dataProcessedTensor.data);
return tensor;
}
async init(backend, modelPath, imageSize) {
this.imageSize = imageSize;
const hint = {backendHint: backend };
this.model = new onnx.InferenceSession(hint);
await this.model.loadModel(modelPath);
}
async runModel(data) {
this.session = this.gpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.gpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.gpuSession;
}
if (this.sessionBackend === 'wasm') {
if (this.cpuSession) {
this.session = this.cpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.cpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.cpuSession;
}
try {
await this.session!.loadModel(this.modelFile);
} catch (e){
this.modelLoading = false;
this.modelInitializing = false;
if (this.sessionBackend === 'webgl') {
this.gpuSession = undefined;
} else {
this.cpuSession = undefined;
}
throw new Error('Error: Backend not supported. ');
}
this.modelLoading = false;
async initSession() {
this.sessionRunning = false;
this.modelLoadingError = false;
if (this.sessionBackend === 'webgl') {
if (this.gpuSession) {
this.session = this.gpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.gpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.gpuSession;
}
if (this.sessionBackend === 'wasm') {
if (this.cpuSession) {
this.session = this.cpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.cpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.cpuSession;
}
try {
await this.session!.loadModel(this.modelFile);
} catch (e){
async initSession() {
this.sessionRunning = false;
this.modelLoadingError = false;
if (this.sessionBackend === 'webgl') {
if (this.gpuSession) {
this.session = this.gpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.gpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.gpuSession;
}
if (this.sessionBackend === 'wasm') {
if (this.cpuSession) {
this.session = this.cpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.cpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.cpuSession;
}
try {
await this.session!.loadModel(this.modelFile);
} catch (e){
async initSession() {
this.sessionRunning = false;
this.modelLoadingError = false;
if (this.sessionBackend === 'webgl') {
if (this.gpuSession) {
this.session = this.gpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.gpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.gpuSession;
}
if (this.sessionBackend === 'wasm') {
if (this.cpuSession) {
this.session = this.cpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.cpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.cpuSession;
}
try {
await this.session!.loadModel(this.modelFile);
} catch (e){
this.session = this.gpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.gpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.gpuSession;
}
if (this.sessionBackend === 'wasm') {
if (this.cpuSession) {
this.session = this.cpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.cpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.cpuSession;
}
try {
await this.session!.loadModel(this.modelFile);
} catch (e){
this.modelLoading = false;
this.modelInitializing = false;
if (this.sessionBackend === 'webgl') {
this.gpuSession = undefined;
} else {
this.cpuSession = undefined;
}
throw new Error('Error: Backend not supported. ');
}
this.modelLoading = false;
this.session = this.gpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.gpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.gpuSession;
}
if (this.sessionBackend === 'wasm') {
if (this.cpuSession) {
this.session = this.cpuSession;
return;
}
this.modelLoading = true;
this.modelInitializing = true;
this.cpuSession = new InferenceSession({backendHint: this.sessionBackend});
this.session = this.cpuSession;
}
try {
await this.session!.loadModel(this.modelFile);
} catch (e){
this.modelLoading = false;
this.modelInitializing = false;
if (this.sessionBackend === 'webgl') {
this.gpuSession = undefined;
} else {
this.cpuSession = undefined;
}
throw new Error('Error: Backend not supported. ');
}
this.modelLoading = false;