Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_modes(self):
store = LogWriter(self.dir, sync_cycle=1)
scalars = []
for i in range(10):
with store.mode("mode-%d" % i) as writer:
scalar = writer.scalar("add/scalar0")
scalars.append(scalar)
for scalar in scalars[:-1]:
for i in range(10):
scalar.add_record(i, float(i))
def setUp(self):
self.dir = "./tmp/storage_test"
self.writer = LogWriter(self.dir, sync_cycle=1).as_mode("train")
def _finetune_cls_task(task, data_reader, feed_list, config=None,
do_eval=False):
main_program = task.main_program()
startup_program = task.startup_program()
loss = task.variable("loss")
accuracy = task.variable("accuracy")
num_epoch = config.num_epoch
batch_size = config.batch_size
log_writer = LogWriter(
os.path.join(config.checkpoint_dir, "vdllog"), sync_cycle=1)
place, dev_count = hub.common.get_running_device_info(config)
with fluid.program_guard(main_program, startup_program):
exe = fluid.Executor(place=place)
data_feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
# select strategy
if isinstance(config.strategy, hub.AdamWeightDecayStrategy):
scheduled_lr = config.strategy.execute(loss, main_program,
data_reader, config)
elif isinstance(config.strategy, hub.DefaultStrategy):
config.strategy.execute(loss)
#TODO: add more finetune strategy
_do_memory_optimization(task, config)
#!/user/bin/env python
import os
import random
import numpy as np
from PIL import Image
from visualdl import ROOT, LogWriter
from visualdl.server.log import logger as log
logdir = './scratch_log'
logw = LogWriter(logdir, sync_cycle=30)
# create scalars in mode train and test.
with logw.mode('train') as logger:
scalar0 = logger.scalar("scratch/scalar")
with logw.mode('test') as logger:
scalar1 = logger.scalar("scratch/scalar")
# add scalar records.
last_record0 = 0.
last_record1 = 0.
for step in range(1, 100):
last_record0 += 0.1 * (random.random() - 0.3)
last_record1 += 0.1 * (random.random() - 0.7)
scalar0.add_record(step, last_record0)
scalar1.add_record(step, last_record1)
# =======================================================================
from __future__ import print_function
import numpy as np
from visualdl import LogWriter
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.framework as framework
from paddle.v2.fluid.initializer import NormalInitializer
from paddle.v2.fluid.param_attr import ParamAttr
# create VisualDL logger and directory
logdir = "./tmp"
logwriter = LogWriter(logdir, sync_cycle=10)
# create 'train' run
with logwriter.mode("train") as writer:
# create 'loss' scalar tag to keep track of loss function
loss_scalar = writer.scalar("loss")
with logwriter.mode("train") as writer:
acc_scalar = writer.scalar("acc")
num_samples = 4
with logwriter.mode("train") as writer:
conv_image = writer.image("conv_image", num_samples,
1) # show 4 samples for every 1 step
input_image = writer.image("input_image", num_samples, 1)
with logwriter.mode("train") as writer:
import mobilenet_v2
import paddle as paddle
import paddle.dataset.cifar as cifar
import paddle.fluid as fluid
from visualdl import LogWriter
# 创建记录器
log_writer = LogWriter(dir='log/', sync_cycle=10)
# 创建训练和测试记录数据工具
with log_writer.mode('train') as writer:
train_cost_writer = writer.scalar('cost')
train_acc_writer = writer.scalar('accuracy')
histogram = writer.histogram('histogram', num_buckets=50)
with log_writer.mode('test') as writer:
test_cost_writer = writer.scalar('cost')
test_acc_writer = writer.scalar('accuracy')
# 定义输入层
image = fluid.layers.data(name='image', shape=[3, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# 获取分类器
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# create VisualDL logger
logdir = "/workspace"
logger = LogWriter(logdir, sync_cycle=100)
# mark the components with 'train' label.
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_keras_train_loss = logger.scalar(
"scalars/scalar_keras_mnist_train_loss")
image_input = logger.image("images/input", 1)
image0 = logger.image("images/image0", 1)
image1 = logger.image("images/image1", 1)
histogram0 = logger.histogram("histogram/histogram0", num_buckets=50)
histogram1 = logger.histogram("histogram/histogram1", num_buckets=50)
train_step = 0
class LossHistory(keras.callbacks.Callback):
'ship', 'truck')
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
fig, ax = plt.subplots()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# we can either show the image or save it locally
# plt.show()
fig.savefig('out' + str(np.random.randint(0, 10000)) + '.pdf')
logdir = "./workspace"
logger = LogWriter(logdir, sync_cycle=100)
# mark the components with 'train' label.
with logger.mode("train"):
# create a scalar component called 'scalars/'
scalar_pytorch_train_loss = logger.scalar(
"scalars/scalar_pytorch_train_loss")
image1 = logger.image("images/image1", 1)
image2 = logger.image("images/image2", 1)
histogram0 = logger.histogram("histogram/histogram0", num_buckets=100)
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
data_reader,
feed_list,
config=None,
do_eval=False):
"""
Finetune sequence labeling task, evaluate metric is F1, precision and recall
"""
main_program = task.main_program()
startup_program = task.startup_program()
loss = task.variable("loss")
seq_len = task.variable("seq_len")
num_epoch = config.num_epoch
batch_size = config.batch_size
log_writer = LogWriter(
os.path.join(config.checkpoint_dir, "vdllog"), sync_cycle=1)
place, dev_count = hub.common.get_running_device_info(config)
with fluid.program_guard(main_program, startup_program):
exe = fluid.Executor(place=place)
data_feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
# Select strategy
if isinstance(config.strategy, hub.AdamWeightDecayStrategy):
scheduled_lr = config.strategy.execute(loss, main_program,
data_reader, config)
elif isinstance(config.strategy, hub.DefaultStrategy):
config.strategy.execute(loss)
#TODO: add more finetune strategy
_do_memory_optimization(task, config)