Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
const mobilenetDemo = async () => {
console.log('Loading model...');
// Pretrained model
mobilenet = await tf.loadLayersModel(MOBILENET_MODEL_PATH);
// Load your own model
// mobilenet = await tf.loadLayersModel('./mymobilenet/mode.json');
// Warmup the model. This isn't necessary, but makes the first prediction
// faster. Call `dispose` to release the WebGL memory allocated for the return
// value of `predict`.
mobilenet.predict(tf.zeros([1, IMAGE_SIZE, IMAGE_SIZE, 3])).dispose();
// Make a prediction through the locally hosted cat.jpg.
const catElement = document.getElementById('img');
if (catElement.complete && catElement.naturalHeight !== 0) {
predict(catElement);
catElement.style.display = '';
} else {
catElement.onload = () => {
updateModel = async () => {
// Get the latest model from the server and refresh the one saved in IndexedDB.
console.log('Updating the model: ' + INDEXEDDB_KEY);
this.setState({ isDownloadingModel: true });
this.model = await tf.loadLayersModel(MODEL_PATH);
await this.model.save('indexeddb://' + INDEXEDDB_KEY);
this.setState({
isDownloadingModel: false,
modelUpdateAvailable: false,
showModelUpdateAlert: false,
showModelUpdateSuccess: true
});
}
async function loadTagger(name) {
if (taggers[name] == null) {
const url = modelUrls[name];
try {
taggers[name] = await tf.loadLayersModel(url);
document.getElementById(name).disabled = false;
} catch (e) {
// Could not load that model. This is not necessarily an error
// as the user may not have trained all the available model types
console.log(`Could not load "${name}" model`);
}
}
return taggers[name];
}
constructor() {
// Initiate variables
this.generatedSentence = document.getElementById("generated-sentence");
this.diversity = 0.5;
this.inputDiversity = document.getElementById("diversity");
this.inputDiversity.onchange = (evt) => {
this.diversity = evt.target.value/40.
console.log('diversity changed to', this.diversity);
}
this.inputSeed = document.getElementById("seed");
this.generateButton = document.getElementById("generate-button");
this.generateButton.onclick = () => {
this.generateText();
}
tf.loadLayersModel('model/model.json').then((model) => {
console.log('loaded model');
this.model = model;
this.enableGeneration();
});
}
async function loadTruncatedMobileNet() {
const mobilenet = await tf.loadLayersModel(
'https://storage.googleapis.com/tfjs-models/tfjs/mobilenet_v1_0.25_224/model.json');
// Return a model that outputs an internal activation.
const layer = mobilenet.getLayer('conv_pw_13_relu');
return tf.model({inputs: mobilenet.inputs, outputs: layer.output});
}
classification: withPrefix('/models/pretrained/keras/classification/model.json'),
embedding: withPrefix('/models/pretrained/keras/embedding/model.json'),
ner: withPrefix('/models/pretrained/keras/ner/model.json')
},
node: {
classification: withPrefix('/models/pretrained/node/classification/model.json'),
embedding: withPrefix('/models/pretrained/node/embedding/model.json'),
ner: withPrefix('/models/pretrained/node/ner/model.json')
},
web: {
classification: withPrefix('/models/pretrained/web/classification/classification.json'),
embedding: withPrefix('/models/pretrained/web/embedding/embedding.json'),
ner: withPrefix('/models/pretrained/web/ner/ner.json')
}
};
const pretrainedEmbedding = await tf.loadLayersModel(modelsUrls[backend].embedding, { strict: false });
const pretrainedClassifier = await tf.loadLayersModel(modelsUrls[backend].classification);
const pretrainedNer = await tf.loadLayersModel(modelsUrls[backend].ner);
return { pretrainedEmbedding, pretrainedClassifier, pretrainedNer };
};
export async function loadModelLocally() {
return await tf.loadLayersModel(LOCAL_MODEL_URL);
}
async loadModel() {
this.model = await tf.loadLayersModel(this.config.modelPath);
this.modelReady = true;
return this;
}
async function _loadModel(
pathOrIOHandler,
modelUrl,
) {
if (modelUrl) {
return await tf.loadGraphModel(modelUrl, pathOrIOHandler);
} else {
return await tf.loadLayersModel(pathOrIOHandler);
}
}
async loadModel(model) {
this.model = await tf.loadLayersModel(`${model}/model.json`);
if (this.audioContext) {
await this.processStream();
} else {
throw new Error('Could not access microphone - getUserMedia not available');
}
return this;
}