Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
wordExampleIndices.push(i);
}
}
if (noiseExampleIndices.length === 0) {
throw new Error(
`Cannot perform augmentation by mixing with noise when ` +
`there is no example with label ${BACKGROUND_NOISE_TAG}`);
}
const mixedXTensors: Array = [];
const mixedLabelIndices: number[] = [];
for (const index of wordExampleIndices) {
const noiseIndex = // Randomly sample from the noises, with replacement.
noiseExampleIndices[getRandomInteger(0, noiseExampleIndices.length)];
const signalTensor = isTypedArray ?
tf.tensor1d(xs[index] as Float32Array) :
xs[index] as tf.Tensor;
const noiseTensor = isTypedArray ?
tf.tensor1d(xs[noiseIndex] as Float32Array) :
xs[noiseIndex] as tf.Tensor;
const mixed: tf.Tensor =
tf.tidy(() => normalize(signalTensor.add(noiseTensor.mul(ratio))));
if (isTypedArray) {
mixedXTensors.push(mixed.dataSync() as Float32Array);
} else {
mixedXTensors.push(mixed);
}
mixedLabelIndices.push(labelIndices[index]);
}
console.log(
`Data augmentation: mixing noise: added ${mixedXTensors.length} ` +
`examples`);
}
if (key.includes("bn")) {
// set gamma to scaled weights for odd layers
let bnGammaShape = layers[key].getWeights()[0].shape;
let index = parseInt(key.substring(2));
let scaleWeightKey = "scale" + index;
if (processedWeights[scaleWeightKey]) {
w.push(tf.tensor1d(processedWeights[scaleWeightKey]['scale'], 'float32'));
} else {
w.push(tf.tensor1d(new Array(bnGammaShape[0]).fill(1), 'float32'));
}
// weight index 1 = beta - 0 (due to Affine = false)
let bnBetaShape = layers[key].getWeights()[1].shape;
w.push(tf.tensor1d(new Array(bnBetaShape[0]).fill(0), 'float32'));
// weight indes 2 = moving_mean
w.push(tf.tensor1d(processedWeights[key]['running_mean'], 'float32'));
// weight index 3 = moving_variance
w.push(tf.tensor1d(processedWeights[key]['running_var'], 'float32'));
}
if (key.includes("output")) {
// weight index 0 = kernel
let denseKernelShape = layers[key].getWeights()[0].shape;
let denseKernel = matrix.transpose2d(processedWeights[key]['weight']);
w.push(tf.tensor2d(denseKernel, denseKernelShape, 'float32'));
// weight index 1 = bias
w.push(tf.tensor1d(processedWeights[key]['bias'], 'float32'));
return tf.tidy(() => {
const z = tf.randomNormal([1, this.nLatents], 0, 1, 'float32');
// Get one hot for pitch encoding
const pitchIdx = tf.tensor1d([pitch - this.minMidiPitch], 'int32');
const pitchOneHot = tf.oneHot(pitchIdx, this.midiPitches);
// Concat and add width and height dimensions.
const cond = tf.concat([z, pitchOneHot], 1).expandDims(1).expandDims(1) as
tf.Tensor4D;
const specgrams = this.predict(cond, 1);
return specgrams;
});
}
for (let i = 0; i < numExamples; ++i) {
shuffledData.push(data[indices[i]]);
shuffledTargets.push(targets[indices[i]]);
}
// Split the data into a training set and a tet set, based on `testSplit`.
const numTestExamples = Math.round(numExamples * testSplit);
const numTrainExamples = numExamples - numTestExamples;
const xDims = shuffledData[0].length;
const xs = tf.tensor2d(shuffledData, [numExamples, xDims]);
// Create a 1D `tf.Tensor` to hold the labels, and convert the number label
// from the set {0, 1, 2} into one-hot encoding (.e.g., 0 --> [1, 0, 0]).
const ys = tf.oneHot(tf.tensor1d(shuffledTargets).toInt(), numClasses);
const xTrain = xs.slice([0, 0], [numTrainExamples, xDims]);
const xTest = xs.slice([numTrainExamples, 0], [numTestExamples, xDims]);
const yTrain = ys.slice([0, 0], [numTrainExamples, numClasses]);
const yTest = ys.slice([0, 0], [numTestExamples, numClasses]);
return [xTrain, yTrain, xTest, yTest];
}
units: 1,
}));
model.compile({
loss: "meanSquaredError",
optimizer: "sgd",
});
const xArr = new Float32Array(6);
let i = 0;
[-1, 0, 1, 2, 3, 4].forEach(elem => {
xArr[i++] = Number(elem);
});
const yArr = new Float32Array([-3, -1, 1, 3, 5, 7]);
const xs = tf.tensor1d(xArr);
const ys = tf.tensor1d(yArr);
const epochs = 10;
const h = await model.fit(xs, ys, {
epochs
});
console.log("last loss:", h.history.loss[epochs - 1]);
console.log("\nTEST: PREDICTING...");
const out = model.predict(tf.tensor2d([10], [1, 1]));
console.log(await out.data());
}
const slotTags: tf.Tensor3D = tf.tidy(() => {
const y2sentences: tf.Tensor2D[] = [];
for (const wordsSlotId of trainY2Chunks[index]) {
const slotIds = tf
.tensor1d(wordsSlotId, 'int32')
.pad([[0, this.datasetParams.maxWordsPerSentence - wordsSlotId.length]]);
const ohe = tf.oneHot(slotIds, slotsLength).asType('float32') as tf.Tensor2D;
slotIds.dispose();
y2sentences.push(ohe);
}
const stack = tf.stack(y2sentences) as tf.Tensor3D;
y2sentences.forEach(s => s.dispose());
return stack;
});
await this.model.fit([intentLabels, embeddedSentenceWords], slotTags, {
function imageTensorFromFlatArray(flat: number[], width: number, height: number) {
return tf.transpose(tf.tensor1d(flat).reshape([1, height, width, 4]).slice([0, 0, 0, 0], [1, height, width, 3]), [0, 2, 1, 3])
}
return tf.tidy(() => {
const player1Hand = tf.tensor1d(gameState.player1Hand, 'int32');
const handOneHot = tf.oneHot(
tf.sub(player1Hand, tf.scalar(1, 'int32')),
game.GAME_STATE.max_card_value);
const features = tf.sum(handOneHot, 0);
const label = tf.tensor1d([gameState.player1Win]);
return {xs: features, ys: label};
});
}
export function padSequences(sequences, maxSeqLength) {
let paddedSequences = []
for (let sequence of sequences) {
let t = tf.tensor1d(sequence).pad([[maxSeqLength - sequence.length, 0]])
paddedSequences.push(t)
}
return tf.stack(paddedSequences)
}