Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
let numTensors;
let logWriter;
if (args.logDir) {
console.log(`Logging to tensorboard at logdir: ${args.logDir}`);
logWriter = tf.node.summaryFileWriter(args.logDir);
}
let step = 0;
for (let epoch = 0; epoch < args.epochs; ++epoch) {
// Write some metadata to disk at the beginning of every epoch.
fs.writeFileSync(
metadataPath,
JSON.stringify(makeMetadata(args.epochs, epoch, false)));
const tBatchBegin = tf.util.now();
const numBatches = Math.ceil(xTrain.shape[0] / args.batchSize);
for (let batch = 0; batch < numBatches; ++batch) {
const actualBatchSize = (batch + 1) * args.batchSize >= xTrain.shape[0] ?
(xTrain.shape[0] - batch * args.batchSize) :
args.batchSize;
const dLoss = await trainDiscriminatorOneStep(
xTrain, yTrain, batch * args.batchSize, actualBatchSize,
args.latentSize, generator, discriminator);
// Here we use 2 * actualBatchSize here, so that we have
// the generator optimizer over an identical number of images
// as the discriminator.
const gLoss = await trainCombinedModelOneStep(
function buildGenerator(latentSize) {
tf.util.assert(
latentSize > 0 && Number.isInteger(latentSize),
`Expected latent-space size to be a positive integer, but ` +
`got ${latentSize}.`);
const cnn = tf.sequential();
// The number of units is chosen so that when the output is reshaped
// and fed through the subsequent conv2dTranspose layers, the tensor
// that comes out at the end has the exact shape that matches MNIST
// images ([28, 28, 1]).
cnn.add(tf.layers.dense(
{units: 3 * 3 * 384, inputShape: [latentSize], activation: 'relu'}));
cnn.add(tf.layers.reshape({targetShape: [3, 3, 384]}));
// Upsample from [3, 3, ...] to [7, 7, ...].
cnn.add(tf.layers.conv2dTranspose({