Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
all_fmj = net.forward_all(data=X).values()[0].squeeze(axis=(2,3))
all_te_fmj = net.forward_all(data=te_X).values()[0].squeeze(axis=(2,3))
all_fmj *= zstd
all_te_fmj *= zstd
info = {}
if 0:
# Just load a pre-calculated version instead
model_fn = base + '.caffemodel'
net = caffe.Classifier(bare_conf_fn, model_fn, image_dims=(32, 32))
net.set_phase_test()
net.set_mode_gpu()
net.set_device(DEVICE_ID)
all_fmj = dd.io.load('all_fmj0_eps_inf.h5')
all_te_fmj = dd.io.load('all_te_fmj0_eps_inf.h5')
else:
#warmstart_fn = base + '.solverstate'
#warmstart_fn = 'models/regression100_6916_loop0_iter_70000.solverstate'
#warmstart_fn = 'models/adaboost100_35934_loop0_iter_70000.solverstate'
warmstart_fn = None
net, info = train_model(name, solver_conf_fn, conf_fn, bare_conf_fn, steps, seed=g_seed, logfile=logfile, device_id=DEVICE_ID, warmstart=warmstart_fn)
all_fmj = net.forward_all(data=X).values()[0].squeeze(axis=(2,3))
all_te_fmj = net.forward_all(data=te_X).values()[0].squeeze(axis=(2,3))
all_fmj *= zstd
all_te_fmj *= zstd
g_seed += 1
----------
path : str
Path to an HDF5 file.
Examples
--------
This is an abstract data type, but let us say that ``Foo`` inherits
from ``Saveable``. To construct an object of this class from a file, we
do:
>>> foo = Foo.load('foo.h5') #doctest: +SKIP
"""
if path is None:
return cls.load_from_dict({})
else:
d = io.load(path)
return cls.load_from_dict(d)
rs = []
#for X in data['responses'][l]:
X = data['responses'][l]
y.append((X**2).mean())
plt.plot(np.arange(len(y)), y, label='{}'.format(name))
plt.xticks(np.arange(len(y)), layers)
plt.ylabel('Second moment')
plt.ylim((0, None))
plt.legend(loc=4)
plt.savefig(vz.impath('svg'))
plt.close()
plt.figure()
for fn in args.responses:
data = dd.io.load(fn)
name = data['name']
if layers is None:
layers = data['layers']
y = []
ystd = []
for l in layers:
rs = []
#for X in data['responses'][l]:
X = data['responses'][l]
y.append(X.std())
plt.plot(np.arange(len(y)), y, label='{}'.format(name))
plt.xticks(np.arange(len(y)), layers)
plt.ylabel('Standard deviation')
plt.ylim((0, None))
plt.legend(loc=4)
parser.add_argument('scores', nargs='+', type=str)
parser.add_argument('-o', '--output', default='scores.h5', type=str)
parser.add_argument('-n', '--name', type=str)
args = parser.parse_args()
all_scores = None
scores = OrderedDict()
labels = None
#scores = []
for s in args.scores:
data = dd.io.load(s)
if args.name:
name = args.name
else:
name = str(data['name'])
if name not in scores:
scores[name] = dict(scores=None, seeds=None)
#scores[name]['names'] = np.concatenate([scores[name]['names'], [data['name']]])
scores[name]['seeds'] = concat(scores[name]['seeds'], data['seed'])
scores[name]['scores'] = concat(scores[name]['scores'], data['scores'])
if labels is None:
labels = data['labels']
else:
np.testing.assert_array_equal(labels, data['labels'])
def load_model(filepath):
"""
Function for loading pymer4 models. A file path ending in .h5 or .hdf5 should be provided. For Lmer models an additional filepath.robj should be located in the same directory.
Args:
model (pymer4.models): an instance of a pymer4 model
filepath (str): full filepath string ending with .h5 or .hd5f
"""
if filepath.endswith(".h5") or filepath.endswith('.hdf5'):
if not os.path.exists(filepath):
raise IOError("File not found!")
# Load h5 first
model_atts = dd.io.load(filepath)
# Figure out what kind of model we're dealing with
if model_atts['simple_atts']['model_class'] == 'Lmer':
model = Lmer('', [])
elif model_atts['simple_atts']['model_class'] == 'Lm2':
model = Lm2('', [] , '')
elif model_atts['simple_atts']['model_class'] == 'Lm':
model = Lm('', [])
# Set top level attributes
for k, v in model_atts['simple_atts'].items():
if k != 'model_class':
setattr(model, k, v)
# Make sure the model formula is a python string string so that rpy2 doesn't complain
model.formula = str(model.formula)
# Set data attributes
def loadUserData(self, tmp_userfeats_path):
for i, userfeat_path in enumerate(tmp_userfeats_path):
print 'loading user feature: %s' % userfeat_path
userdict0 = dd.io.load(userfeat_path)
self.userfeats_dict = dict(self.userfeats_dict, **userdict0)
print 'finished loading'
userdict0 = None
self.userphotoids = self.userfeats_dict.keys()
random.shuffle(self.userphotoids)
self.n_batch = int(np.ceil(len(self.userphotoids)/train_batch_size))
def load_or_run_results(
re_run=False, fname=None,
src_type='VMD',
sigma_halfspace=0.01
):
if re_run:
run_simulation(
fname=fname, sigma_halfspace=sigma_halfspace,
src_type=src_type
)
else:
downloads, directory = download_and_unzip_data()
fname = os.path.sep.join([directory, fname])
simulation_results = dd.io.load(fname)
mesh = Mesh.TensorMesh(simulation_results['mesh']['h'], x0=simulation_results['mesh']['x0'])
sigma = simulation_results['sigma']
times = simulation_results['time']
E = simulation_results['E']
B = simulation_results['B']
J = simulation_results['J']
output = {
"mesh":mesh,
"sigma":sigma,
"times":times,
"E":E,
"B":B,
"J":J,
}
return output
def write(self, file_name=None, **kwargs):
""" Write out Brain_Data object to Nifti or HDF5 File.
Args:
file_name: (str) name of nifti file including path
kwargs: optional arguments to deepdish.io.save
"""
if ('.h5' in file_name) or ('.hdf5' in file_name):
x_columns, x_index = _df_meta_to_arr(self.X)
y_columns, y_index = _df_meta_to_arr(self.Y)
dd.io.save(file_name, {
'data': self.data,
'X': self.X.values,
'X_columns': x_columns,
'X_index': x_index,
'Y': self.Y.values,
'Y_columns': y_columns,
'Y_index': y_index,
'mask_affine': self.mask.affine,
'mask_data': self.mask.get_data(),
'mask_file_name': self.mask.get_filename(),
'file_name': self.file_name
}, compression=kwargs.get('compression', 'blosc'))
else:
self.to_nifti().to_filename(file_name)
vz = VzLog(args.output)
vz.title(args.title)
vz.log('alpha =', alpha)
mms = []
for fn in args.model:
mm = caffe.proto.caffe_pb2.NetParameter()
with open(fn, 'rb') as f:
mm.ParseFromString(f.read())
mms.append(mm)
layers = args.layers
plt.figure()
for fn in args.responses:
data = dd.io.load(fn)
name = data['name']
if layers is None:
layers = data['layers']
y = []
ystd = []
for l in layers:
rs = []
#for X in data['responses'][l]:
X = data['responses'][l]
C = np.corrcoef(X.T)
print(l, X.shape)
C[np.isnan(C)] = 0.0
try:
rs.append(np.where(np.sort(np.linalg.eigvals(C))[::-1].cumsum() / C.shape[0] > alpha)[0][0] / C.shape[0])
except:
break