Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
r0_500 = 0.2
L0 = [L0]*6
maxSpeed = 20.0
speeds = [ud()*maxSpeed for _ in range(6)]
directions = [ud()*360.0*galsim.degrees for _ in range(6)]
#screen_size = 819.2
screen_size = 32
screen_scale = 0.1
kmax = 1
if sys.version_info >= (3,4):
import multiprocessing as mp
ctxs = [None, mp.get_context("fork"), mp.get_context("spawn"), "forkserver"]
else:
ctxs = [None] # Only supported ctx on py27
for ctx in ctxs:
kwargs = dict(r0_500=r0_500, L0=L0, speed=speeds, direction=directions,
altitude=altitudes, r0_weights=weights, rng=rng,
screen_size=screen_size, screen_scale=screen_scale, mp_context=ctx)
atm = galsim.Atmosphere(**kwargs)
xs = np.linspace(-1, 1, 10)
ys = np.linspace(1, -1, 10)
ts = np.linspace(0, 2, 10)
wf = atm.wavefront(xs, ys, ts)
pkl_file = 'output/atm_pickle_test.pkl'
with open(pkl_file, 'wb') as fd:
logger=self._logger,
message="Creating and starting worker process named [{}] to invoke callable [{}] "
"with args={} and kwargs={}".format(self.worker_process_name, job, job_args, job_kwargs)
)
# Set the exit_handler function if provided, or reset to None
# Also save the job_args, and job_kwargs, which will be passed to the exit_handler
self._exit_handler = exit_handler
self._job_args = job_args
self._job_kwargs = job_kwargs
# Use the 'fork' method to create a new child process.
# This shares the same python interpreter and memory space and references as the parent process
# A side-effect of that is that it inherits the signal-handlers of the parent process. So wrap the job to be
# executed with some before-advice that resets the signal-handlers to python-defaults within the child process
ctx = mp.get_context("fork")
def signal_reset_wrapper(func, args, kwargs):
# Reset signal handling to defaults in process that calls this
for sig in self.EXIT_SIGNALS:
signal.signal(sig, signal.SIG_DFL)
# Then execute the given function with the given args
func(*args, **kwargs)
self._worker_process = ctx.Process(
name=self.worker_process_name,
target=signal_reset_wrapper,
args=(job, job_args, job_kwargs),
daemon=True # daemon=True ensures that if the parent dispatcher dies, it will kill this child worker
)
self._worker_process.start()
log_job_message(
def classify(probe, train_data, train_label, train_fake_label, test_data, test_label, test_fake_label):
mp = multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
processes = []
for key in train_data:
x_train = train_data[key]
y_train = train_label
x_test = test_data[key]
y_test = test_label
process = mp.Process(target=run,
args=(probe, key, x_train, y_train, x_test, y_test, error_queue),
daemon=False)
process.start()
error_handler.add_child(process.pid)
processes.append(process)
object should provide SimpleQueue, Queue and Process.
initializer: An callable used to initialize worker processes.
initargs: A tuple of arguments to pass to the initializer.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
else:
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
if mp_context is None:
mp_context = mp.get_context()
self._mp_context = mp_context
if initializer is not None and not callable(initializer):
raise TypeError("initializer must be a callable")
self._initializer = initializer
self._initargs = initargs
# Management thread
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self.vy = vy
self.alpha = alpha
if rng is None:
rng = BaseDeviate()
self._suppress_warning = suppress_warning
self._orig_rng = rng.duplicate()
self.dynamic = True
self.reversible = self.alpha == 1.0
# Use shared memory for screens. Allocate it here; fill it in on demand.
# Shared memory implementation depends on python version.
if sys.version_info >= (3,4):
if not isinstance(mp_context, multiprocessing.context.BaseContext):
mp_context = multiprocessing.get_context(mp_context)
self.mp_context = mp_context
# A unique id for this screen, created in the parent process, that can be used to find the
# correct shared memory object in child processes.
self._shareKey = id(self)
self._objDict = AtmosphericScreen._initObjDict(self.mp_context, self.npix, alpha=self.alpha)
_GSScreenShare[self._shareKey] = self._objDict
def divide(dirn, conf=None):
'''
Create an instance of a Cell in a subprocess.
Args:
dirn (str): Path to the directory backing the Cell.
conf (dict): Configuration data.
Returns:
multiprocessing.Process: The Process object which was created to run the Cell
'''
ctx = multiprocessing.get_context('spawn')
proc = ctx.Process(target=main, args=(dirn, conf))
proc.start()
return proc
import logging
import multiprocessing
import platform
import player_methods
from surface_tracker import background_tasks, offline_utils
from surface_tracker.cache import Cache
from surface_tracker.surface import Surface, Surface_Location
logger = logging.getLogger(__name__)
# On macOS, "spawn" is set as default start method in main.py. This is not required
# here and we set it back to "fork" to improve performance.
if platform.system() == "Darwin":
mp_context = multiprocessing.get_context("fork")
else:
mp_context = multiprocessing.get_context()
class Surface_Offline(Surface):
"""Surface_Offline uses a cache to reuse previously computed surface locations.
The cache is filled in the background.
"""
def __init__(self, name="unknown", init_dict=None):
self.location_cache = None
super().__init__(name=name, init_dict=init_dict)
self.cache_seek_idx = mp_context.Value("i", 0)
self.location_cache_filler = None
self.observations_frame_idxs = []
HelperThread(
MongoPruner, tasks=tasks, run_every=timedelta(minutes=run_every)
)
)
metrics_config = beer_garden.config.get("metrics")
if metrics_config.prometheus.enabled:
self.helper_threads.append(
HelperThread(
PrometheusServer,
metrics_config.prometheus.host,
metrics_config.prometheus.port,
)
)
self.context = multiprocessing.get_context("spawn")
self.log_queue = self.context.Queue()
self.log_reader = HelperThread(EntryPointLogger, log_queue=self.log_queue)
for entry_name, entry_value in beer_garden.config.get("entry").items():
if entry_value.get("enable"):
self.entry_points.append(EntryPoint.create(entry_name))
arr_dim = np.floor((((nchains*niterations)*step_instance.total_var_dimension)/step_instance.history_thin))+len_old_history
else:
if niterations < step_instance.history_thin:
arr_dim = ((np.floor(nchains*niterations/step_instance.history_thin)+nchains)*step_instance.total_var_dimension)+(step_instance.nseedchains*step_instance.total_var_dimension)
else:
arr_dim = np.floor(((nchains*niterations/step_instance.history_thin)*step_instance.total_var_dimension))+(step_instance.nseedchains*step_instance.total_var_dimension)
min_nseedchains = 2*len(step_instance.DEpairs)*nchains
if step_instance.nseedchains < min_nseedchains:
raise Exception('The size of the seeded starting history is insufficient. Increase nseedchains>=%s.' %str(min_nseedchains))
current_position_dim = nchains*step_instance.total_var_dimension
# Get context to define arrays
if mp_context is None:
ctx = mp.get_context(mp_context)
else:
ctx = mp_context
history_arr = ctx.Array('d', [0] * int(arr_dim))
if step_instance.history_file != False:
history_arr[0:len_old_history] = old_history.flatten()
nCR = step_instance.nCR
ngamma = step_instance.ngamma
crossover_setting = step_instance.CR_probabilities
crossover_probabilities = ctx.Array('d', crossover_setting)
ncrossover_updates = ctx.Array('d', [0] * nCR)
delta_m = ctx.Array('d', [0] * nCR)
gamma_level_setting = step_instance.gamma_probabilities
gamma_probabilities = ctx.Array('d', gamma_level_setting)
ngamma_updates = ctx.Array('d', [0] * ngamma)
delta_m_gamma = ctx.Array('d', [0] * ngamma)
current_position_arr = ctx.Array('d', [0] * current_position_dim)
def __init__(self, make_env, num_processes, envs_per_process, observation_space, action_space, seeds=None):
"""
:param env_fns:
:param gym.spaces.Box observation_space:
:param gym.spaces.Box or gym.spaces.Discrete action_space:
"""
self.logger = logging.getLogger(__name__)
self.action_space = action_space
self.observation_space = observation_space
# processをspawnで立ち上げる。interpreterが新規に立つので起動は遅いが、tensorflowのメモリ空間を共有するとめんどうそうなので
ctx = mp.get_context("spawn")
num_envs = num_processes * envs_per_process
self.num_envs = num_envs
# shape=(num_envs, observation_space.shape)
obs_shape = (num_envs,) + observation_space.shape
obs_size = int(np.prod(obs_shape))
shared_obs = ctx.Array(_NP_TO_CT[observation_space.dtype], obs_size, lock=False)
shared_next_obs = ctx.Array(_NP_TO_CT[observation_space.dtype], obs_size, lock=False)
self.obs_area = SectionArea(shared_obs, dtype=observation_space.dtype, shape=obs_shape)
# reset考慮後のobservation
self.next_obs_area = SectionArea(shared_next_obs, dtype=observation_space.dtype, shape=obs_shape)
action_shape = (num_envs,) + action_space.shape
action_size = int(np.prod(action_shape))
shared_action = ctx.Array(_NP_TO_CT[action_space.dtype], action_size, lock=False)
self.action_area = SectionArea(shared_action, dtype=action_space.dtype, shape=action_shape)