Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def setup_mongo_observer(ex):
mongo_url = os.getenv('SACRED_MONGO_URL')
db_name = os.getenv('SACRED_DB_NAME')
if mongo_url is not None and db_name is not None:
ex.observers.append(MongoObserver.create(url=mongo_url, db_name=db_name))
from modl.datasets.hcp import fetch_hcp, contrasts_description
from modl.utils.system import get_cache_dirs
from modl.decomposition.fmri import compute_loadings
import matplotlib.pyplot as plt
import sys
from os import path
sys.path.append(path.dirname(path.dirname
(path.dirname(path.abspath(__file__)))))
task_data_ing = Ingredient('task_data')
prediction_ex = Experiment('task_predict_from_nii', ingredients=[task_data_ing])
observer = MongoObserver.create(db_name='amensch', collection='runs')
prediction_ex.observers.append(observer)
observer = FileStorageObserver.create(expanduser('~/output/runs'))
prediction_ex.observers.append(observer)
@prediction_ex.config
def config():
standardize = True
C = np.logspace(-1, 2, 15)
n_jobs = 20
verbose = 10
seed = 2
max_iter = 10000
tol = 1e-7
transform_batch_size = 300
from sacred import Experiment
from sacred.observers import MongoObserver
from modl.input_data.fmri.monkey import monkey_patch_nifti_image
monkey_patch_nifti_image()
from sklearn.externals.joblib import Memory
from modl.datasets import fetch_adhd
from modl.datasets import fetch_hcp, get_data_dirs
from modl.input_data.fmri.unmask import create_raw_rest_data, get_raw_rest_data
from modl.utils.system import get_cache_dirs
unmask_rest = Experiment('unmask_rest')
observer = MongoObserver.create(db_name='amensch', collection='runs')
unmask_rest.observers.append(observer)
@unmask_rest.config
def config():
source = 'adhd'
smoothing_fwhm = 6
n_jobs = 3
@unmask_rest.named_config
def hcp():
source = 'hcp'
smoothing_fwhm = 4
n_jobs = 36
import pandas as pd
from modl.classification import make_loadings_extractor
from modl.datasets import get_data_dirs
from modl.input_data.fmri.unmask import get_raw_contrast_data
from modl.utils.system import get_cache_dirs
from sacred import Experiment
from sacred.observers import MongoObserver
from sklearn.externals.joblib import Memory
from sklearn.externals.joblib import dump
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
predict_contrast = Experiment('predict_contrast')
observer = MongoObserver.create(db_name='amensch', collection='runs')
predict_contrast.observers.append(observer)
@predict_contrast.config
def config():
alphas = np.logspace(-3, 3, 7).tolist()
standardize = True
scale_importance = 'sqrt'
n_jobs = 30
verbose = 2
seed = 2
max_iter = 200
tol = 1e-7
alpha = 1e-4
multi_class = 'multinomial'
fit_intercept = True
identity = False
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.optional import pymongo
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.utils import check_random_state, shuffle
sys.path.append(path.dirname(path.dirname
(path.dirname(path.abspath(__file__)))))
from examples.predict_contrast import predict_contrast_exp
predict_contrast_multi_exp = Experiment('predict_contrast_train_size',
ingredients=[predict_contrast_exp])
collection = predict_contrast_multi_exp.path
observer = MongoObserver.create(db_name='amensch', collection=collection)
predict_contrast_multi_exp.observers.append(observer)
@predict_contrast_multi_exp.config
def config():
n_jobs = 24
n_seeds = 10
seed = 2
def single_run(config_updates, _id, master_id):
observer = MongoObserver.create(db_name='amensch', collection=collection)
predict_contrast_exp.observers = [observer]
@predict_contrast_exp.config
def config():
def single_run(config_updates, _id, master_id):
observer = MongoObserver.create(db_name='amensch',
collection=collection)
@predict_contrast.config
def config():
n_jobs = 1
from_loadings = True
projection = False
factored = True
loadings_dir = join(get_data_dirs()[0], 'pipeline', 'contrast',
'reduced')
verbose = 2
max_iter = 50
predict_contrast.observers = [observer]
run = predict_contrast._create_run(config_updates=config_updates)
# -*- coding: utf-8 -*-
from network import Network
from PIL import Image
from scale import size, load_batches
import numpy as np
import os
import sys
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.utils import apply_backspaces_and_linefeeds
from gpu_helpers import init_all_gpu
init_all_gpu()
ex = Experiment('Superresolution', ingredients=[])
ex.observers.append(MongoObserver())
ex.captured_out_filter = apply_backspaces_and_linefeeds
@ex.config
def my_config():
image_size = (320, 240)
batch_size = 5
no_epochs = 500
lr = 0.0001
lr_stair_width = 10
lr_decay = 0.95
@ex.capture
def log_training_performance(_run, loss, lr):
_run.log_scalar("loss", float(loss))
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.optional import pymongo
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.utils import check_random_state, shuffle
sys.path.append(path.dirname(path.dirname
(path.dirname(path.abspath(__file__)))))
from examples.predict_contrast import predict_contrast_exp
predict_contrast_multi_exp = Experiment('predict_contrast_multinomial',
ingredients=[predict_contrast_exp])
collection = predict_contrast_multi_exp.path
observer = MongoObserver.create(db_name='amensch', collection=collection)
predict_contrast_multi_exp.observers.append(observer)
@predict_contrast_multi_exp.config
def config():
n_jobs = 10
n_seeds = 10
seed = 2
def single_run(config_updates, _id, master_id):
observer = MongoObserver.create(db_name='amensch', collection=collection)
predict_contrast_exp.observers = [observer]
@predict_contrast_exp.config
def config():
def single_run(config_updates, _id, master_id):
observer = MongoObserver.create(db_name='amensch', collection=collection)
predict_contrast_exp.observers = [observer]
@predict_contrast_exp.config
def config():
n_jobs = 1
epochs = 100
steps_per_epoch = 300
dropout_input = 0.25
dropout_latent = 0.5
source = 'hcp_rs_concat'
depth_prob = [0, 1., 0]
shared_supervised = False
batch_size = 256
alpha = 1e-5
validation = False
mix_batch = False
from nilearn.datasets import load_mni152_brain_mask
from sacred import Experiment
from sacred.observers import MongoObserver
from sklearn.externals.joblib import Memory
from modl.input_data.fmri.monkey import monkey_patch_nifti_image
monkey_patch_nifti_image()
from modl.datasets import get_data_dirs
from modl.datasets.hcp import fetch_hcp, INTERESTING_CONTRASTS_EXTENDED
from modl.input_data.fmri.unmask import create_raw_contrast_data
unmask_task = Experiment('unmask_contrast_hcp')
observer = MongoObserver.create(db_name='amensch', collection='runs')
unmask_task.observers.append(observer)
@unmask_task.config
def config():
n_jobs = 30
batch_size = 1200
@unmask_task.automain
def run(n_jobs, batch_size, _run):
dataset = fetch_hcp()
imgs = dataset.contrasts
interesting_con = INTERESTING_CONTRASTS_EXTENDED
imgs = imgs.loc[(slice(None), slice(None), interesting_con), :]
mask = fetch_hcp().mask