Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def init_apis(*args, **kwargs):
global cfg
cfg = pkconfig.init(
expiry_days=(None, _cfg_login_days, 'when auth login expires'),
)
functools.partial(
container.logs,
stdout=False,
stderr=True,
)
)
return pkcollections.Dict(
returncode=result['StatusCode'],
stdout=stdout,
stderr=stderr,
)
finally:
await trio.run_sync_in_worker_thread(container.remove)
cfg = pkconfig.init(
mount_dev_env_into_container=(
True if pkconfig.channel_in('dev') else False,
bool,
'mount host ~/.pyenv and ~/src into container',
),
try:
if not celery_tasks.celery.control.ping():
err = 'You need to start Celery:\nsirepo service celery'
except Exception:
err = 'You need to start Rabbit:\nsirepo service rabbitmq'
# Rabbit doesn't have a long timeout, but celery ping does
time.sleep(.5)
if not err:
return
#TODO(robnagler) really should be pkconfig.Error() or something else
# but this prints a nice message. Don't call sys.exit, not nice
pkcli.command_error(err)
cfg = pkconfig.init(
docker_image=('radiasoft/sirepo', str, 'docker image to run all jobs'),
import_secs=(10, int, 'maximum runtime of backgroundImport'),
# default is set in init(), because of server.cfg.job_gueue
job_class=(None, cfg_job_class, 'how to run jobs: Celery or Background'),
parallel_secs=(3600, int, 'maximum runtime of serial job'),
sequential_secs=(300, int, 'maximum runtime of serial job'),
)
from mpi4py import MPI
if MPI.COMM_WORLD.Get_rank():
import signal
signal.signal(signal.SIGTERM, lambda x, y: MPI.COMM_WORLD.Abort(1))
'''
n = re.sub(r'^from __future.*', abort, script, count=1, flags=re.MULTILINE)
script = abort + script if n == script else n
fn = 'mpi_run.py'
pkio.write_text(fn, script)
p = None
return run_program([sys.executable or 'python', fn])
cfg = pkconfig.init(
cores=(1, int, 'cores to use per run'),
slaves=(1, int, 'DEPRECATED: set $SIREPO_MPI_CORES'),
)
cfg.cores = max(cfg.cores, cfg.slaves)
def init():
global cfg, _CLASSES, _DEFAULT_CLASS
assert not cfg
cfg = pkconfig.init(
modules=((_DEFAULT_MODULE,), set, 'available job driver modules'),
)
_CLASSES = PKDict()
p = pkinspect.this_module().__name__
for n in cfg.modules:
m = importlib.import_module(pkinspect.module_name_join((p, n)))
_CLASSES[n] = m.init_class()
_DEFAULT_CLASS = _CLASSES.get('docker') or _CLASSES.get(_DEFAULT_MODULE)
pkdlog('initialized with drivers {}', _CLASSES.keys())
),
)
@pkconfig.parse_none
def _cfg_http_name(value):
assert re.search(r'^\w{1,32}$', value), \
'must be 1-32 word characters; http_name={}'.format(value)
return value
def _state():
return flask.g.sirepo_cookie
cfg = pkconfig.init(
http_name=('sirepo_' + pkconfig.cfg.channel, _cfg_http_name, 'Set-Cookie name'),
private_key=(None, str, 'urlsafe base64 encrypted 32-byte key'),
is_secure=(
not pkconfig.channel_in('dev'),
pkconfig.parse_bool,
'Add secure attriute to Set-Cookie',
)
def _init(app):
global _db
app.session_interface = _FlaskSessionInterface()
app.config.update(
SQLALCHEMY_DATABASE_URI='sqlite:///{}'.format(_db_filename(app)),
SQLALCHEMY_COMMIT_ON_TEARDOWN=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
)
_db = SQLAlchemy(app, session_options=dict(autoflush=True))
global cfg
cfg = pkconfig.init(
github_key=(None, str, 'GitHub application key'),
github_secret=(None, str, 'GitHub application secret'),
github_callback_uri=(None, str, 'GitHub application callback URI'),
)
if not cfg.github_key or not cfg.github_secret:
raise RuntimeError('Missing GitHub oauth config')
def init():
global _DB_DIR, cfg, _NEXT_REQUEST_SECONDS
if _DB_DIR:
return
job.init()
job_driver.init()
_DB_DIR = sirepo.srdb.root().join(_DB_SUBDIR)
pykern.pkio.mkdir_parent(_DB_DIR)
cfg = pkconfig.init(
parallel=dict(
max_hours=(1, float, 'maximum run-time for parallel job (except sbatch)'),
),
sbatch_poll_secs=(60, int, 'how often to poll squeue and parallel status'),
sequential=dict(
max_hours=(.1, float, 'maximum run-time for sequential job'),
),
)
for k in job.KINDS:
_MAX_RUN_SECS[k] = int(cfg[k].max_hours * 3600)
_NEXT_REQUEST_SECONDS = PKDict({
job.PARALLEL: 2,
job.SBATCH: cfg.sbatch_poll_secs,
job.SEQUENTIAL: 1,
})