Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
wandb.termwarn("Not logging key \"{}\". Histograms must have fewer than {} bins".format(
tag, wandb.Histogram.MAX_LENGTH), repeat=False)
else:
#TODO: is there a case where we can render this?
wandb.termwarn("Not logging key \"{}\". Found a histogram with only 2 bins.".format(tag), repeat=False)
elif value.tag == "_hparams_/session_start_info":
if wandb.util.get_module("tensorboard.plugins.hparams"):
from tensorboard.plugins.hparams import plugin_data_pb2
plugin_data = plugin_data_pb2.HParamsPluginData()
plugin_data.ParseFromString(
value.metadata.plugin_data.content)
for key, param in six.iteritems(plugin_data.session_start_info.hparams):
if not wandb.run.config.get(key):
wandb.run.config[key] = param.number_value or param.string_value or param.bool_value
else:
wandb.termerror(
"Received hparams tf.summary, but could not import the hparams plugin from tensorboard")
return values
# changes.
log_dir = os.path.dirname(os.path.commonprefix(list(writers)))
filename = os.path.basename(name)
# Tensorboard loads all tfevents files in a directory and prepends
# their values with the path. Passing namespace to log allows us
# to nest the values in wandb
namespace = name.replace(filename, "").replace(
log_dir, "").strip(os.sep)
if save:
wandb.save(name, base_path=log_dir)
for path in glob.glob(os.path.join(log_dir, "*.pbtxt")):
if os.stat(path).st_mtime >= wandb.START_TIME:
wandb.save(path, base_path=log_dir)
log(event, namespace=namespace, step=event.step)
except Exception as e:
wandb.termerror("Unable to log event %s" % e)
return _add_event
# to nest the values in wandb
namespace = logdir.replace(filename, "").replace(
rootdir, "").strip(os.sep)
# TODO: revisit this heuristic, it exists because we don't know the
# root log directory until more than one tfevents file is written to
if len(dirs) == 1 and namespace not in ["train", "validation"]:
namespace = None
with self._tensorboard_lock:
self._tensorboard_watchers.append(Watcher(logdir, self._watcher_queue, namespace=namespace, save=save))
if self._tensorboard_consumer is None:
self._tensorboard_consumer = Consumer(self._watcher_queue)
self._tensorboard_consumer.start()
self._tensorboard_watchers[-1].start()
return self._tensorboard_watchers
except ImportError as e:
wandb.termerror("Couldn't import tensorboard, not streaming events. Run `pip install tensorboard`")
pass
elif len(sweep_split) == 2:
split_project, sweep_id = sweep_split
if project and split_project:
wandb.termwarn('Ignoring project commandline parameter')
project = split_project or project
elif len(sweep_split) == 3:
split_entity, split_project, sweep_id = sweep_split
if entity and split_entity:
wandb.termwarn('Ignoring entity commandline parameter')
if project and split_project:
wandb.termwarn('Ignoring project commandline parameter')
project = split_project or project
entity = split_entity or entity
else:
wandb.termerror('Expected sweep_id in form of sweep, project/sweep, or entity/project/sweep')
return
if entity:
env.set_entity(entity)
if project:
env.set_project(project)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
log_level = logging.DEBUG
if in_jupyter:
log_level = logging.ERROR
ch.setLevel(log_level)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
try:
self._progress[self.label] = {
'total': size,
'uploaded': 0,
'failed': False
}
try:
with open(self.save_path, 'rb') as f:
self._api.push(
{self.save_name: f},
progress=lambda _, t: self.progress(t))
except Exception as e:
self._progress[self.label]['uploaded'] = 0
self._progress[self.label]['failed'] = True
wandb.util.sentry_exc(e)
wandb.termerror('Error uploading "{}": {}, {}'.format(
self.save_name, type(e).__name__, e))
def run_controller(sweep_id=None, verbose=False):
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
api = InternalApi()
try:
sweep = Sweep(api, sweep_id=sweep_id, verbose=verbose)
except SweepError as err:
wandb.termerror('Controller Error: %s' % err)
return
sweep.run()
if not root:
root = os.path.abspath(os.getcwd())
host = socket.gethostname()
remote_url = 'file://%s%s' % (host, root)
run.save(program=args['program'], api=api)
env = dict(os.environ)
run.set_environment(env)
try:
rm = wandb.run_manager.RunManager(api, run)
except wandb.run_manager.Error:
exc_type, exc_value, exc_traceback = sys.exc_info()
wandb.termerror('An Exception was raised during setup, see %s for full traceback.' %
util.get_log_file_path())
wandb.termerror(exc_value)
if 'permission' in str(exc_value):
wandb.termerror(
'Are you sure you provided the correct API key to "wandb login"?')
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
logging.error('\n'.join(lines))
else:
rm.run_user_process(args['program'], args['args'], env)
# to nest the values in wandb
namespace = logdir.replace(filename, "").replace(
rootdir, "").strip(os.sep)
# TODO: revisit this heuristic, it exists because we don't know the
# root log directory until more than one tfevents file is written to
if len(dirs) == 1 and namespace not in ["train", "validation"]:
namespace = None
with self._tensorboard_lock:
self._tensorboard_watchers.append(Watcher(logdir, self._watcher_queue, namespace=namespace, save=save))
if self._tensorboard_consumer is None:
self._tensorboard_consumer = Consumer(self._watcher_queue)
self._tensorboard_consumer.start()
self._tensorboard_watchers[-1].start()
return self._tensorboard_watchers
except ImportError:
wandb.termerror("Couldn't import tensorboard, not streaming events. Run `pip install tensorboard`")