Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_tfflags(wandb_init_run):
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
wandb.config.update(FLAGS)
assert wandb_init_run.config['learning_rate'] == 0.01
elif arguments.get('--hypers-override-file') is not None:
with open(arguments.get('--hypers-override-file')) as f:
hyperparameters.update(json.load(f))
os.makedirs(save_folder, exist_ok=True)
if tag_in_vcs:
hyperparameters['git_commit'] = git_tag_run(run_name)
# turns off wandb if you don't want to log anything
if arguments.get('--dryrun'):
os.environ["WANDB_MODE"] = 'dryrun'
# save hyperparams to logging
# must filter out type=set from logging when as that is not json serializable
wandb.init(name=run_name, config={k: v for k, v in hyperparameters.items() if not isinstance(v, set)})
wandb.config.update({'model-class': arguments['--model'],
'train_folder': str(train_data_dirs),
'valid_folder': str(valid_data_dirs),
'save_folder': str(save_folder),
'test_folder': str(test_data_dirs),
'CUDA_VISIBLE_DEVICES': os.environ.get("CUDA_VISIBLE_DEVICES", 'Not Set'),
'run-name': arguments.get('--run-name'),
'CLI-command': ' '.join(sys.argv)})
if arguments.get('--evaluate-model'):
model_path = RichPath.create(arguments['--evaluate-model'])
else:
model_path = run_train(model_class, train_data_dirs, valid_data_dirs, save_folder, hyperparameters,
azure_info_path, run_name, arguments['--quiet'],
max_files_per_dir=max_files_per_dir,
parallelize=not(arguments['--sequential']))
def train(self):
"""Train the agent."""
# logger
if self.args.log:
wandb.init()
wandb.config.update(self.hyper_params)
# wandb.watch([self.actor, self.critic], log="parameters")
# pre-training if needed
self.pretrain()
for self.i_episode in range(1, self.args.episode_num + 1):
state = self.env.reset()
done = False
score = 0
self.episode_step = 0
losses = list()
t_begin = time.time()
while not done:
if self.args.render and self.i_episode >= self.args.render_after:
def train(self):
"""Train the agent."""
# logger
if self.args.log:
wandb.init()
wandb.config.update(self.hyper_params)
# wandb.watch([self.actor, self.vf, self.qf_1, self.qf_2], log="parameters")
# pre-training if needed
self.pretrain()
for self.i_episode in range(1, self.args.episode_num + 1):
state = self.env.reset()
done = False
score = 0
self.episode_step = 0
loss_episode = list()
t_begin = time.time()
while not done:
if self.args.render and self.i_episode >= self.args.render_after:
def train(self):
"""Train the agent."""
# logger
if self.args.log:
wandb.init()
wandb.config.update(self.hyper_params)
# wandb.watch([self.actor, self.critic1, self.critic2], log="parameters")
for self.i_episode in range(1, self.args.episode_num + 1):
state = self.env.reset()
done = False
score = 0
loss_episode = list()
self.episode_step = 0
t_begin = time.time()
while not done:
if self.args.render and self.i_episode >= self.args.render_after:
self.env.render()
action = self.select_action(state)
def train(self):
"""Train the agent."""
# logger
if self.args.log:
wandb.init()
wandb.config.update(self.hyper_params)
wandb.watch([self.actor, self.critic], log="parameters")
for i_episode in range(1, self.args.episode_num + 1):
state = self.env.reset()
done = False
score = 0
loss_episode = list()
while not done:
if self.args.render and i_episode >= self.args.render_after:
self.env.render()
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
lr = config["lr"]
size = 300
wd = 1e-2
bs = 8 # reduce this if you are running out of GPU memory
pretrained = True
config = {
'epochs' : epochs,
'lr' : lr,
'size' : size,
'wd' : wd,
'bs' : bs,
'pretrained' : pretrained,
}
wandb.config.update(config)
metrics = [
Precision(average='weighted', clas_idx=1),
Recall(average='weighted', clas_idx=1),
FBeta(average='weighted', beta=1, clas_idx=1),
]
data = datasets_fastai.load_dataset(dataset, size, bs)
encoder_model = models.resnet18
learn = unet_learner(data, encoder_model, path='models', metrics=metrics, wd=wd, bottle=True, pretrained=pretrained)
callbacks = [
WandbCallback(learn, log=None, input_type="images"),
MyCSVLogger(learn, filename='baseline_model'),
ExportCallback(learn, "baseline_model", monitor='f_beta'),
MySaveModelCallback(learn, every='epoch', monitor='f_beta')
def on_train_begin(self, logs):
self.train_start = timeit.default_timer()
self.metrics_names = self.model.metrics_names
wandb.config.update({
'params': self.params,
'env': self.env.__dict__,
'env.env': self.env.env.__dict__,
'env.env.spec': self.env.env.spec.__dict__,
'agent': self.model.__dict__
})