Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
elif hasattr(level._v_attrs, 'strtype'):
strtype = level._v_attrs.strtype
itemsize = level._v_attrs.itemsize
if strtype == b'unicode':
shape = level.shape[:-1] + (level.shape[-1] // itemsize // 4,)
elif strtype == b'ascii':
shape = level.shape
node = NumpyArrayNode(shape, strtype.decode('ascii'))
return node
elif isinstance(level, tables.link.SoftLink):
node = SoftLinkNode(level.target)
return node
else:
return Node()
return ObjectNode()
node = NumpyArrayNode(level.shape, 'unknown')
return node
elif isinstance(level, tables.Array):
stats = {}
if settings.get('summarize'):
stats['mean'] = level[:].mean()
stats['std'] = level[:].std()
compression = {}
if settings.get('compression'):
compression['complib'] = level.filters.complib
compression['shuffle'] = level.filters.shuffle
compression['complevel'] = level.filters.complevel
node = NumpyArrayNode(level.shape, _format_dtype(level.dtype),
statistics=stats, compression=compression)
if hasattr(level._v_attrs, 'zeroarray_dtype'):
dtype = level._v_attrs.zeroarray_dtype
node = NumpyArrayNode(tuple(level), _format_dtype(dtype))
elif hasattr(level._v_attrs, 'strtype'):
strtype = level._v_attrs.strtype
itemsize = level._v_attrs.itemsize
if strtype == b'unicode':
shape = level.shape[:-1] + (level.shape[-1] // itemsize // 4,)
elif strtype == b'ascii':
shape = level.shape
node = NumpyArrayNode(shape, strtype.decode('ascii'))
if settings.get('summarize'):
stats['mean'] = level[:].mean()
stats['std'] = level[:].std()
compression = {}
if settings.get('compression'):
compression['complib'] = level.filters.complib
compression['shuffle'] = level.filters.shuffle
compression['complevel'] = level.filters.complevel
node = NumpyArrayNode(level.shape, _format_dtype(level.dtype),
statistics=stats, compression=compression)
if hasattr(level._v_attrs, 'zeroarray_dtype'):
dtype = level._v_attrs.zeroarray_dtype
node = NumpyArrayNode(tuple(level), _format_dtype(dtype))
elif hasattr(level._v_attrs, 'strtype'):
strtype = level._v_attrs.strtype
itemsize = level._v_attrs.itemsize
if strtype == b'unicode':
shape = level.shape[:-1] + (level.shape[-1] // itemsize // 4,)
elif strtype == b'ascii':
shape = level.shape
node = NumpyArrayNode(shape, strtype.decode('ascii'))
return node
elif isinstance(level, tables.link.SoftLink):
node = SoftLinkNode(level.target)
return node
else:
node = NumpyArrayNode(level.shape, _format_dtype(level.dtype),
statistics=stats, compression=compression)
if hasattr(level._v_attrs, 'zeroarray_dtype'):
dtype = level._v_attrs.zeroarray_dtype
node = NumpyArrayNode(tuple(level), _format_dtype(dtype))
elif hasattr(level._v_attrs, 'strtype'):
strtype = level._v_attrs.strtype
itemsize = level._v_attrs.itemsize
if strtype == b'unicode':
shape = level.shape[:-1] + (level.shape[-1] // itemsize // 4,)
elif strtype == b'ascii':
shape = level.shape
node = NumpyArrayNode(shape, strtype.decode('ascii'))
return node
elif isinstance(level, tables.link.SoftLink):
node = SoftLinkNode(level.target)
return node
else:
return Node()
dtype = level._v_attrs.zeroarray_dtype
node = NumpyArrayNode(tuple(level), _format_dtype(dtype))
elif hasattr(level._v_attrs, 'strtype'):
strtype = level._v_attrs.strtype
itemsize = level._v_attrs.itemsize
if strtype == b'unicode':
shape = level.shape[:-1] + (level.shape[-1] // itemsize // 4,)
elif strtype == b'ascii':
shape = level.shape
node = NumpyArrayNode(shape, strtype.decode('ascii'))
return node
elif isinstance(level, tables.link.SoftLink):
node = SoftLinkNode(level.target)
return node
else:
return Node()
if args.seed is None:
pattern = re.compile(r'_s(\d+)_')
m = pattern.search(os.path.basename(args.caffemodel))
if m:
seed = int(m.group(1))
else:
raise ValueError('Could not automatically determine seed')
else:
seed = args.seed
print('Seed:', seed)
scores = net.forward_all(data=x).values()[0].squeeze((2, 3))
yhat = scores.argmax(-1)
if args.output:
dd.io.save(args.output, dict(scores=scores, labels=y, name=name, seed=seed))
success = (yhat == y).mean()
error = 1 - success
print('Success: {:.2f}% / Error: {:.2f}%'.format(success * 100, error * 100))
welcome = "Loaded {} into '{}':".format(
path_desc,
paint('data', 'blue', colorize=colorize))
# Import deepdish for the session
import deepdish as dd
import IPython
IPython.embed(header=welcome)
i = 0
if args.inspect is not None:
fn = single_file(args.file)
try:
data = io.load(fn, args.inspect)
except ValueError:
s = 'Error: Could not find group: {}'.format(args.inspect)
print(paint(s, 'red', colorize=colorize))
sys.exit(1)
if args.ipython:
run_ipython(fn, group=args.inspect, data=data)
else:
print(data)
elif args.ipython:
fn = single_file(args.file)
data = io.load(fn)
run_ipython(fn, data=data)
else:
for f in args.file:
# State that will be incremented
settings['filtered_count'] = 0
if args.inspect is not None:
fn = single_file(args.file)
try:
data = io.load(fn, args.inspect)
except ValueError:
s = 'Error: Could not find group: {}'.format(args.inspect)
print(paint(s, 'red', colorize=colorize))
sys.exit(1)
if args.ipython:
run_ipython(fn, group=args.inspect, data=data)
else:
print(data)
elif args.ipython:
fn = single_file(args.file)
data = io.load(fn)
run_ipython(fn, data=data)
else:
for f in args.file:
# State that will be incremented
settings['filtered_count'] = 0
if args.column_width is None:
settings['left-column-width'] = max(MIN_AUTOMATIC_COLUMN_WIDTH, min(MAX_AUTOMATIC_COLUMN_WIDTH, _discover_column_width(f)))
else:
settings['left-column-width'] = args.column_width
s = get_tree(f, raw=args.raw, settings=settings)
if s is not None:
if i > 0:
print()
all_fmj = net.forward_all(data=X).values()[0].squeeze(axis=(2,3))
all_te_fmj = net.forward_all(data=te_X).values()[0].squeeze(axis=(2,3))
all_fmj *= zstd
all_te_fmj *= zstd
info = {}
if 0:
# Just load a pre-calculated version instead
model_fn = base + '.caffemodel'
net = caffe.Classifier(bare_conf_fn, model_fn, image_dims=(32, 32))
net.set_phase_test()
net.set_mode_gpu()
net.set_device(DEVICE_ID)
all_fmj = dd.io.load('all_fmj0_eps_inf.h5')
all_te_fmj = dd.io.load('all_te_fmj0_eps_inf.h5')
else:
#warmstart_fn = base + '.solverstate'
#warmstart_fn = 'models/regression100_6916_loop0_iter_70000.solverstate'
#warmstart_fn = 'models/adaboost100_35934_loop0_iter_70000.solverstate'
warmstart_fn = None
net, info = train_model(name, solver_conf_fn, conf_fn, bare_conf_fn, steps, seed=g_seed, logfile=logfile, device_id=DEVICE_ID, warmstart=warmstart_fn)
all_fmj = net.forward_all(data=X).values()[0].squeeze(axis=(2,3))
all_te_fmj = net.forward_all(data=te_X).values()[0].squeeze(axis=(2,3))
all_fmj *= zstd
all_te_fmj *= zstd
g_seed += 1
# store results in this directory
name = '_'.join([mdl.name, cellname, expt, stim, datetime.now().strftime('%Y.%m.%d-%H.%M')])
base = f'../results/{name}'
os.makedirs(base, exist_ok=True)
# define model callbacks
cbs = [cb.ModelCheckpoint(os.path.join(base, 'weights-{epoch:03d}-{val_loss:.3f}.h5')),
cb.TensorBoard(log_dir=base, histogram_freq=1, batch_size=5000, write_grads=True),
cb.ReduceLROnPlateau(min_lr=0, factor=0.2, patience=10),
cb.CSVLogger(os.path.join(base, 'training.csv')),
cb.EarlyStopping(monitor='val_loss', patience=20)]
# train
history = mdl.fit(x=data.X, y=data.y, batch_size=bz, epochs=nb_epochs,
callbacks=cbs, validation_split=val_split, shuffle=True)
dd.io.save(os.path.join(base, 'history.h5'), history.history)
return history