Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def single_run(config_updates, rundir, _id):
run = single_exp._create_run(config_updates=config_updates)
observer = FileStorageObserver.create(basedir=rundir)
run._id = _id
run.observers = [observer]
run()
from sacred.observers import FileStorageObserver
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.utils import check_random_state
print(path.dirname(path.dirname(path.abspath(__file__))))
# Add examples to known models
sys.path.append(
path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
from exps_old.pipelining.decompose import exp as single_exp
exp = Experiment('multi_decompose')
basedir = join(get_output_dir(), 'multi_decompose')
if not os.path.exists(basedir):
os.makedirs(basedir)
exp.observers.append(FileStorageObserver.create(basedir=basedir))
@exp.config
def config():
n_jobs = 7
seed = 1000
@single_exp.config
def config():
n_components = 128
batch_size = 200
learning_rate = 0.92
method = 'masked'
reduction = 12
alpha = 1e-4
from cogspaces.pipeline import get_output_dir, make_data_frame, split_folds, \
MultiDatasetTransformer
from joblib import dump
from os.path import join
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit
from cogspaces.models.trace import TransferTraceNormEstimator
idx = pd.IndexSlice
exp = Experiment('single_exp')
basedir = join(get_output_dir(), 'single_exp')
exp.observers.append(FileStorageObserver.create(basedir=basedir))
@exp.config
def config():
datasets = ['brainomics', 'hcp']
reduced_dir = join(get_output_dir(), 'reduced')
unmask_dir = join(get_output_dir(), 'unmasked')
# source = 'mix'
source = 'hcp_new'
test_size = {'hcp': .1, 'archi': .5, 'brainomics': .5, 'camcan': .5,
'la5c': .5, 'full': .5}
train_size = dict(hcp=None, archi=None, la5c=None, brainomics=None,
camcan=None,
human_voice=None)
dataset_weights = {'brainomics': 1, 'archi': 1, 'hcp': 1}
model = 'trace'
def single_run(config_updates, rundir, _id):
run = single_exp._create_run(config_updates=config_updates)
observer = FileStorageObserver.create(basedir=rundir)
run._id = _id
run.observers = [observer]
try:
run()
except:
print('Run %i failed' % _id)
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.utils import check_random_state
from cogspaces.pipeline import get_output_dir
# Add examples to known models
sys.path.append(path.dirname(path.dirname
(path.dirname(path.abspath(__file__)))))
from exps_old.old.exp_predict import exp as single_exp
exp = Experiment('predict_multi')
basedir = join(get_output_dir(), 'predict_multi')
if not os.path.exists(basedir):
os.makedirs(basedir)
exp.observers.append(FileStorageObserver.create(basedir=basedir))
@exp.config
def config():
n_jobs = 30
n_seeds = 20
seed = 2
@single_exp.config
def config():
datasets = ['archi', 'hcp', 'brainomics']
reduced_dir = join(get_output_dir(), 'reduced')
unmask_dir = join(get_output_dir(), 'unmasked')
source = 'hcp_rs_concat'
n_subjects = None
'scheduler': self.scheduler.state_dict()}
torch.save(state, checkpoint_path)
return checkpoint_path
def _restore(self, checkpoint_path):
if hasattr(self, 'device'):
checkpoint = torch.load(checkpoint_path, self.device)
else:
checkpoint = torch.load(checkpoint_path)
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
ex = Experiment('Cifar10_experiment')
ex.observers.append(FileStorageObserver.create('logs'))
slack_config_path = Path('../config/slack.json') # Add webhook_url there for Slack notification
if slack_config_path.exists():
ex.observers.append(SlackObserver.from_config(str(slack_config_path)))
@ex.config
def default_config():
model = 'LeNet' # Name of model, see model_utils.py
model_args = {} # Arguments to be passed to the model, as a dictionary
optimizer = 'Adam' # Which optimizer to use, either Adam or SGD
lr_decay = False # Whether to use learning rate decay
lr_decay_period = 25 # Period of learning rate decay
weight_decay = False # Whether to use weight decay
ntrials = 20 # Number of trials for hyperparameter tuning
nmaxepochs = 100 # Maximum number of epochs
decay_milestones = [int(30 * nmaxepochs / 100), int(60 * nmaxepochs / 100), int(80 * nmaxepochs / 100)]
def single_run(config_updates, rundir, _id):
run = single_exp._create_run(config_updates=config_updates)
observer = FileStorageObserver.create(basedir=rundir)
run._id = _id
run.observers = [observer]
run()
self.log_dir = log_dir
else:
self.log_dir = os.path.join(self.log_dir, self.sacred_ex_name())
if not os.path.exists(self.log_dir):
os.mkdir(self.log_dir)
self.sacred_db_name()
ex = Experiment(self.sacred_ex_name())
ex.captured_out_filter = apply_backspaces_and_linefeeds
if observer_type == 'mongodb':
print('Connecting to MongoDB at {}:{}'.format(mongo_url, self.sacred_db_name()))
ex.observers.append(MongoObserver.create(url=mongo_url, db_name=self.sacred_db_name()))
elif observer_type == 'file':
ex.observers.append(FileStorageObserver.create(self.log_dir))
else:
raise ValueError('{} is not a valid type for a SACRED observer.'.format(observer_type))
if hasattr(f_config, '__call__'):
# init the experiment configuration using a function
ex.config(f_config)
elif isinstance(f_config, str):
# init the experiment configuration usinga file
ex.add_config(f_config)
elif isinstance(f_config, dict):
# init the experiment configuration usinga file
ex.add_config(f_config)
else:
raise ValueError('You should provide either a fucntion or a config file for setting up an experiemet.'
'The given paramter has type {} which is not valid'.format(type(f_config)))