Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
const createModel = async(xTrain, yTrain, xTest, yTest) => {
const params = {learningRate: 0.1, epochs: 40};
// Define the topology of the model: two dense layers.
const model = tf.sequential();
model.add(tf.layers.dense({units: 10, activation: 'sigmoid', inputShape: [xTrain.shape[1]]}));
model.add(tf.layers.dense({units: numClasses, activation: 'softmax'}));
model.summary();
const optimizer = tf.train.adam(params.learningRate);
model.compile({
optimizer: optimizer,
loss: 'categoricalCrossentropy',
metrics: ['accuracy'],
});
await model.fit(xTrain, yTrain, {
epochs: params.epochs,
validationData: [xTest, yTest],
});
await model.save('file://model');
return model;
const hiddenLayers = params.hiddenLayers
const latentDim = params.latentDim
const hiddenDim = params.hiddenDim
const learningRate = params.learningRate, adamBeta1 = params.adamBeta1
const outputActivation = "sigmoid"
// console.log(numFeatures);
// Specify encoder
const input = tf.input({ shape: [numFeatures] })
let encoderHidden = tf.layers.dense({ units: hiddenDim[0], activation: "relu" }).apply(input);
let i = 1
while (i < hiddenDim.length) {
encoderHidden = tf.layers.dense({ units: hiddenDim[i], activation: "relu" }).apply(encoderHidden);
i++
}
const z_ = tf.layers.dense({ units: latentDim }).apply(encoderHidden);
const encoder = tf.model({ inputs: input, outputs: z_, name: "encoder" })
// Specify decoder
const latentInput = tf.input({ shape: [latentDim] })
let decoderHidden = tf.layers.dense({ units: hiddenDim[hiddenDim.length - 1], activation: "relu" }).apply(latentInput);
let j = hiddenDim.length - 1
while (j > 0) {
j--;
decoderHidden = tf.layers.dense({ units: hiddenDim[j], activation: "relu" }).apply(decoderHidden);
}
const decoderOutput = tf.layers.dense({ units: numFeatures, activation: outputActivation }).apply(decoderHidden);
const decoder = tf.model({ inputs: latentInput, outputs: decoderOutput, name: "decoder" })
const createModel = async(xTrain, yTrain, xTest, yTest) => {
const params = {learningRate: 0.1, epochs: 40};
// Define the topology of the model: two dense layers.
const model = tf.sequential();
model.add(tf.layers.dense({units: 10, activation: 'sigmoid', inputShape: [xTrain.shape[1]]}));
model.add(tf.layers.dense({units: numClasses, activation: 'softmax'}));
model.summary();
const optimizer = tf.train.adam(params.learningRate);
model.compile({
optimizer: optimizer,
loss: 'categoricalCrossentropy',
metrics: ['accuracy'],
});
await model.fit(xTrain, yTrain, {
epochs: params.epochs,
validationData: [xTest, yTest],
});
await model.save('file://model');
return model;
for (let index = 10; index < prices.length; index++) {
lookbackPrices[index - 10] = prices.slice(index - 10, index);
targets.push(prices[index]);
}
tfPrices = tf.tensor2d(lookbackPrices);
global.pred = tf.tensor2d(lookbackPrices[0], [1, 10]);
global.pred = tf.reshape(global.pred, [1, 10, 1]);
tfTargets = tf.tensor1d(targets);
tfPrices = tf.reshape(tfPrices, [prices.length - 10, 10, 1]);
//tfPrices.print();
//tfTargets.print();
const model = tf.sequential();
model.add(tf.layers.lstm({ units: 32, inputShape: [10, 1] }));
model.add(tf.layers.dense({ units: 1, activation: 'linear' }));
$lr = parseFloat($('#txtLearningRate').val());
const lr = $lr;
const opt = tf.train.adam(lr);
const loss = 'meanSquaredError';
openSnackbar("Compiling model");
model.compile({ optimizer: opt, loss: loss, metrics: ['mae', 'mse'] }); /* Using Mean Absolute Error as metrics for accuracy of model */
async function fit() {
t = targets.map((el) => minMaxInverseScaler(el, min, max));
t = t.slice(t.length - 100, t.length);
predictChart.data.labels = dates.slice(dates.length - 100, dates.length);
var loss = Infinity;
var epochs = 1;
var targetEpochs = parseFloat($('#txtNumEpochs').val());
while (epochs < targetEpochs && window.startStop == 1) {
buildRetrainingModel(denseUnits, numClasses, learningRate) {
this.model = tf.sequential({
layers: [
// Flattens the input to a vector so we can use it in a dense layer. While
// technically a layer, this only performs a reshape (and has no training
// parameters).
tf.layers.flatten({
inputShape: this.decapitatedMobilenet.outputs[0].shape.slice(
1
)
}),
// Layer 1.
tf.layers.dense({
units: denseUnits,
activation: "relu",
kernelInitializer: "varianceScaling",
useBias: true
}),
// Layer 2. The number of units of the last layer should correspond
// to the number of classes we want to predict.
tf.layers.dense({
units: numClasses,
kernelInitializer: "varianceScaling",
model.add(
// tf.layers.bidirectional({
// layer: tf.layers.lstm({
// units: params.UNITS,
// dropout: params.DROPOUT_REG,
// recurrentDropout: params.DROPOUT_REG
// })
// })
tf.layers.lstm({
units: params.UNITS,
dropout: params.DROPOUT_REG,
recurrentDropout: params.DROPOUT_REG
})
)
model.add(
tf.layers.dense({
units: outputDim,
activation: 'softmax'
})
)
return model
}
function createConvModel() {
const model = tf.sequential();
model.add(tf.layers.conv2d({
inputShape: [28, 28, 1],
kernelSize: 3,
filters: 16,
activation: 'relu'
}));
model.add(tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));
model.add(tf.layers.conv2d({ kernelSize: 3, filters: 32, activation: 'relu' }));
model.add(tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));
model.add(tf.layers.conv2d({ kernelSize: 3, filters: 32, activation: 'relu' }));
model.add(tf.layers.flatten({}));
model.add(tf.layers.dense({ units: 64, activation: 'relu' }));
model.add(tf.layers.dense({ units: 10, activation: 'softmax' }));
return model;
}
item.species === 'setosa' ? 1 : 0,
item.species === 'virginica' ? 1 : 0,
item.species === 'versicolor' ? 1 : 0
]), [iris_test.length, 3])
const model = tf.sequential();
model.add(tf.layers.dense({
inputShape: [4],
activation: "sigmoid",
units: 10,
name: "layer1"
}))
model.add(tf.layers.dense({
inputShape: [10],
activation: "softmax",
units: 3,
name: "layer2"
}))
model.compile({
loss: "categoricalCrossentropy",
optimizer: tf.train.adam(),
metrics: ['accuracy'],
})
model.summary()
async function train_data() {
for (let i = 0; i < 15; i++) {
function createConvModel() {
const model = tf.sequential();
model.add(tf.layers.conv2d({
inputShape: [28, 28, 1],
kernelSize: 3,
filters: 16,
activation: 'relu'
}));
model.add(tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));
model.add(tf.layers.conv2d({ kernelSize: 3, filters: 32, activation: 'relu' }));
model.add(tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));
model.add(tf.layers.conv2d({ kernelSize: 3, filters: 32, activation: 'relu' }));
model.add(tf.layers.flatten({}));
model.add(tf.layers.dense({ units: 64, activation: 'relu' }));
model.add(tf.layers.dense({ units: 10, activation: 'softmax' }));
return model;
}