Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
tf = require('@tensorflow/tfjs-node-gpu');
} else {
console.log('Using CPU');
tf = require('@tensorflow/tfjs-node');
}
if (!fs.existsSync(path.dirname(args.generatorSavePath))) {
fs.mkdirSync(path.dirname(args.generatorSavePath));
}
const saveURL = `file://${args.generatorSavePath}`;
const metadataPath = path.join(args.generatorSavePath, 'acgan-metadata.json');
// Build the discriminator.
const discriminator = buildDiscriminator();
discriminator.compile({
optimizer: tf.train.adam(args.learningRate, args.adamBeta1),
loss: ['binaryCrossentropy', 'sparseCategoricalCrossentropy']
});
discriminator.summary();
// Build the generator.
const generator = buildGenerator(args.latentSize);
generator.summary();
const optimizer = tf.train.adam(args.learningRate, args.adamBeta1);
const combined = buildCombinedModel(
args.latentSize, generator, discriminator, optimizer);
await data.loadData();
let {images: xTrain, labels: yTrain} = data.getTrainData();
yTrain = tf.expandDims(yTrain.argMax(-1), -1);
const saveURL = `file://${args.generatorSavePath}`;
const metadataPath = path.join(args.generatorSavePath, 'acgan-metadata.json');
// Build the discriminator.
const discriminator = buildDiscriminator();
discriminator.compile({
optimizer: tf.train.adam(args.learningRate, args.adamBeta1),
loss: ['binaryCrossentropy', 'sparseCategoricalCrossentropy']
});
discriminator.summary();
// Build the generator.
const generator = buildGenerator(args.latentSize);
generator.summary();
const optimizer = tf.train.adam(args.learningRate, args.adamBeta1);
const combined = buildCombinedModel(
args.latentSize, generator, discriminator, optimizer);
await data.loadData();
let {images: xTrain, labels: yTrain} = data.getTrainData();
yTrain = tf.expandDims(yTrain.argMax(-1), -1);
// Save the generator model once before starting the training.
await generator.save(saveURL);
let numTensors;
let logWriter;
if (args.logDir) {
console.log(`Logging to tensorboard at logdir: ${args.logDir}`);
logWriter = tf.node.summaryFileWriter(args.logDir);
}
const optimizer = tf.train.adam(args.learningRate, args.adamBeta1);
const combined = buildCombinedModel(
args.latentSize, generator, discriminator, optimizer);
await data.loadData();
let {images: xTrain, labels: yTrain} = data.getTrainData();
yTrain = tf.expandDims(yTrain.argMax(-1), -1);
// Save the generator model once before starting the training.
await generator.save(saveURL);
let numTensors;
let logWriter;
if (args.logDir) {
console.log(`Logging to tensorboard at logdir: ${args.logDir}`);
logWriter = tf.node.summaryFileWriter(args.logDir);
}
let step = 0;
for (let epoch = 0; epoch < args.epochs; ++epoch) {
// Write some metadata to disk at the beginning of every epoch.
fs.writeFileSync(
metadataPath,
JSON.stringify(makeMetadata(args.epochs, epoch, false)));
const tBatchBegin = tf.util.now();
const numBatches = Math.ceil(xTrain.shape[0] / args.batchSize);
for (let batch = 0; batch < numBatches; ++batch) {
const actualBatchSize = (batch + 1) * args.batchSize >= xTrain.shape[0] ?
(xTrain.shape[0] - batch * args.batchSize) :
let numTensors;
let logWriter;
if (args.logDir) {
console.log(`Logging to tensorboard at logdir: ${args.logDir}`);
logWriter = tf.node.summaryFileWriter(args.logDir);
}
let step = 0;
for (let epoch = 0; epoch < args.epochs; ++epoch) {
// Write some metadata to disk at the beginning of every epoch.
fs.writeFileSync(
metadataPath,
JSON.stringify(makeMetadata(args.epochs, epoch, false)));
const tBatchBegin = tf.util.now();
const numBatches = Math.ceil(xTrain.shape[0] / args.batchSize);
for (let batch = 0; batch < numBatches; ++batch) {
const actualBatchSize = (batch + 1) * args.batchSize >= xTrain.shape[0] ?
(xTrain.shape[0] - batch * args.batchSize) :
args.batchSize;
const dLoss = await trainDiscriminatorOneStep(
xTrain, yTrain, batch * args.batchSize, actualBatchSize,
args.latentSize, generator, discriminator);
// Here we use 2 * actualBatchSize here, so that we have
// the generator optimizer over an identical number of images
// as the discriminator.
const gLoss = await trainCombinedModelOneStep(
optimizer: tf.train.adam(args.learningRate, args.adamBeta1),
loss: ['binaryCrossentropy', 'sparseCategoricalCrossentropy']
});
discriminator.summary();
// Build the generator.
const generator = buildGenerator(args.latentSize);
generator.summary();
const optimizer = tf.train.adam(args.learningRate, args.adamBeta1);
const combined = buildCombinedModel(
args.latentSize, generator, discriminator, optimizer);
await data.loadData();
let {images: xTrain, labels: yTrain} = data.getTrainData();
yTrain = tf.expandDims(yTrain.argMax(-1), -1);
// Save the generator model once before starting the training.
await generator.save(saveURL);
let numTensors;
let logWriter;
if (args.logDir) {
console.log(`Logging to tensorboard at logdir: ${args.logDir}`);
logWriter = tf.node.summaryFileWriter(args.logDir);
}
let step = 0;
for (let epoch = 0; epoch < args.epochs; ++epoch) {
// Write some metadata to disk at the beginning of every epoch.
fs.writeFileSync(
metadataPath,
`dLoss = ${dLoss[0].toFixed(6)}, gLoss = ${gLoss[0].toFixed(6)}`);
if (logWriter != null) {
logWriter.scalar('dLoss', dLoss[0], step);
logWriter.scalar('gLoss', gLoss[0], step);
step++;
}
// Assert on no memory leak.
// TODO(cais): Remove this check once the current memory leak in
// tfjs-node and tfjs-node-gpu is fixed.
if (numTensors == null) {
numTensors = tf.memory().numTensors;
} else {
tf.util.assert(
tf.memory().numTensors === numTensors,
`Leaked ${tf.memory().numTensors - numTensors} tensors`);
}
}
await generator.save(saveURL);
console.log(
`epoch ${epoch + 1} elapsed time: ` +
`${((tf.util.now() - tBatchBegin) / 1e3).toFixed(1)} s`);
console.log(`Saved generator model to: ${saveURL}\n`);
}
// Write metadata to disk to indicate the end of the training.
fs.writeFileSync(
metadataPath,
JSON.stringify(makeMetadata(args.epochs, args.epochs, true)));
}
console.log(
`epoch ${epoch + 1}/${args.epochs} batch ${batch + 1}/${
numBatches}: ` +
`dLoss = ${dLoss[0].toFixed(6)}, gLoss = ${gLoss[0].toFixed(6)}`);
if (logWriter != null) {
logWriter.scalar('dLoss', dLoss[0], step);
logWriter.scalar('gLoss', gLoss[0], step);
step++;
}
// Assert on no memory leak.
// TODO(cais): Remove this check once the current memory leak in
// tfjs-node and tfjs-node-gpu is fixed.
if (numTensors == null) {
numTensors = tf.memory().numTensors;
} else {
tf.util.assert(
tf.memory().numTensors === numTensors,
`Leaked ${tf.memory().numTensors - numTensors} tensors`);
}
}
await generator.save(saveURL);
console.log(
`epoch ${epoch + 1} elapsed time: ` +
`${((tf.util.now() - tBatchBegin) / 1e3).toFixed(1)} s`);
console.log(`Saved generator model to: ${saveURL}\n`);
}
// Write metadata to disk to indicate the end of the training.
fs.writeFileSync(
kernelSize: 5,
strides: 2,
padding: 'same',
activation: 'tanh',
kernelInitializer: 'glorotNormal'
}));
// Unlike most TensorFlow.js models, the generator part of an ACGAN has
// two inputs:
// 1. The latent vector that is used as the "seed" of the fake image
// generation.
// 2. A class label that controls which of the ten MNIST digit classes
// the generated fake image is meant to belong to.
// This is the z space commonly referred to in GAN papers.
const latent = tf.input({shape: [latentSize]});
// The desired label of the generated image, an integer in the interval
// [0, NUM_CLASSES).
const imageClass = tf.input({shape: [1]});
// The desired label is converted to a vector of length `latentSize`
// through embedding lookup.
const classEmbedding = tf.layers.embedding({
inputDim: NUM_CLASSES,
outputDim: latentSize,
embeddingsInitializer: 'glorotNormal'
}).apply(imageClass);
// Hadamard product between z-space and a class conditional embedding.
const h = tf.layers.multiply().apply([latent, classEmbedding]);
kernelInitializer: 'glorotNormal'
}));
// Unlike most TensorFlow.js models, the generator part of an ACGAN has
// two inputs:
// 1. The latent vector that is used as the "seed" of the fake image
// generation.
// 2. A class label that controls which of the ten MNIST digit classes
// the generated fake image is meant to belong to.
// This is the z space commonly referred to in GAN papers.
const latent = tf.input({shape: [latentSize]});
// The desired label of the generated image, an integer in the interval
// [0, NUM_CLASSES).
const imageClass = tf.input({shape: [1]});
// The desired label is converted to a vector of length `latentSize`
// through embedding lookup.
const classEmbedding = tf.layers.embedding({
inputDim: NUM_CLASSES,
outputDim: latentSize,
embeddingsInitializer: 'glorotNormal'
}).apply(imageClass);
// Hadamard product between z-space and a class conditional embedding.
const h = tf.layers.multiply().apply([latent, classEmbedding]);
const fakeImage = cnn.apply(h);
return tf.model({inputs: [latent, imageClass], outputs: fakeImage});
}
cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
cnn.add(tf.layers.dropout({rate: 0.3}));
cnn.add(tf.layers.conv2d(
{filters: 128, kernelSize: 3, padding: 'same', strides: 2}));
cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
cnn.add(tf.layers.dropout({rate: 0.3}));
cnn.add(tf.layers.conv2d(
{filters: 256, kernelSize: 3, padding: 'same', strides: 1}));
cnn.add(tf.layers.leakyReLU({alpha: 0.2}));
cnn.add(tf.layers.dropout({rate: 0.3}));
cnn.add(tf.layers.flatten());
const image = tf.input({shape: [IMAGE_SIZE, IMAGE_SIZE, 1]});
const features = cnn.apply(image);
// Unlike most TensorFlow.js models, the discriminator has two outputs.
// The 1st output is the probability score assigned by the discriminator to
// how likely the input example is a real MNIST image (as versus
// a "fake" one generated by the generator).
const realnessScore =
tf.layers.dense({units: 1, activation: 'sigmoid'}).apply(features);
// The 2nd output is the softmax probabilities assign by the discriminator
// for the 10 MNIST digit classes (0 through 9). "aux" stands for "auxiliary"
// (the namesake of ACGAN) and refers to the fact that unlike a standard GAN
// (which performs just binary real/fake classification), the discriminator
// part of ACGAN also performs multi-class classification.
const aux = tf.layers.dense({units: NUM_CLASSES, activation: 'softmax'})
.apply(features);