Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if isinstance(arg, tuple):
# tuples are (bundle_uuid, genpath) which have not been fleshed out
return arg + (func,)
try:
if func is None:
return arg
# String encoding of a function: size s/a/b
for f in func.split(FUNC_DELIM):
if f == 'str':
arg = str(arg)
elif f == 'date':
arg = formatting.date_str(float(arg)) if arg is not None else None
elif f == 'duration':
arg = formatting.duration_str(float(arg)) if arg is not None else None
elif f == 'size':
arg = formatting.size_str(float(arg)) if arg is not None else None
elif f.startswith('%'):
arg = (f % float(arg)) if arg is not None else None
elif f.startswith('s/'): # regular expression: s//
esc_slash = '_ESC_SLASH_' # Assume this doesn't occur in s
# Preserve escaped characters: \/
tokens = f.replace('\\/', esc_slash).split('/')
if len(tokens) != 3:
return '' % f
s = tokens[1].replace(esc_slash, '/')
t = tokens[2].replace(esc_slash, '/')
arg = re.sub(s, t, arg)
elif f.startswith('['): # substring
m = re.match('\[(.*):(.*)\]', f)
if m:
start = int(m.group(1) or 0)
end = int(m.group(2) or len(arg))
'container_time_user', run_state.container_time_user
),
container_time_system=run_stats.get(
'container_time_system', run_state.container_time_system
),
)
if run_state.resources.time and container_time_total > run_state.resources.time:
kill_messages.append(
'Time limit exceeded. (Container uptime %s > time limit %s)'
% (duration_str(container_time_total), duration_str(run_state.resources.time))
)
if run_state.max_memory > run_state.resources.memory or run_state.exitcode == '137':
kill_messages.append(
'Memory limit %s exceeded.' % size_str(run_state.resources.memory)
)
if run_state.resources.disk and run_state.disk_utilization > run_state.resources.disk:
kill_messages.append(
'Disk limit %sb exceeded.' % size_str(run_state.resources.disk)
)
if kill_messages:
run_state = run_state._replace(kill_message=' '.join(kill_messages), is_killed=True)
return run_state
self._prune_failed_dependencies()
# With all the locks (should be fast if no cleanup needed, otherwise make sure nothing is corrupted
while True:
with self._global_lock:
self._acquire_all_locks()
bytes_used = sum(dep_state.size_bytes for dep_state in self._dependencies.values())
serialized_length = len(codalab.worker.pyjson.dumps(self._dependencies))
if (
bytes_used > self._max_cache_size_bytes
or serialized_length > LocalFileSystemDependencyManager.MAX_SERIALIZED_LEN
):
logger.debug(
'%d dependencies in cache, disk usage: %s (max %s), serialized size: %s (max %s)',
len(self._dependencies),
size_str(bytes_used),
size_str(self._max_cache_size_bytes),
size_str(serialized_length),
LocalFileSystemDependencyManager.MAX_SERIALIZED_LEN,
)
ready_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.READY and not dep_state.dependents
}
failed_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.FAILED
}
if failed_deps:
dep_key_to_remove = min(
failed_deps.items(), key=lambda dep: dep[1].last_used
def update_state_and_check_killed(bytes_downloaded):
"""
Callback method for bundle service client updates dependency state and
raises DownloadAbortedException if download is killed by dep. manager
"""
with self._dependency_locks[dependency_state.dependency_key]:
state = self._dependencies[dependency_state.dependency_key]
if state.killed:
raise DownloadAbortedException("Aborted by user")
self._dependencies[dependency_state.dependency_key] = state._replace(
size_bytes=bytes_downloaded,
message="Downloading dependency: %s downloaded"
% size_str(bytes_downloaded),
)
def get_top_level_contents(self, target):
info = self.get_target_info(target, 1)
if info is not None and info['type'] == 'directory':
for item in info['contents']:
item['size_str'] = formatting.size_str(item['size'])
return info
)
self.container_time_system = (
run_stats.get('container_time_system', self.container_time_system),
)
if self.resources.time and container_time_total > self.resources.time:
kill_messages.append(
'Time limit exceeded. (Container uptime %s > time limit %s)'
% (duration_str(container_time_total), duration_str(self.resources.time))
)
if self.max_memory > self.resources.memory or self.exitcode == '137':
kill_messages.append('Memory limit %s exceeded.' % size_str(self.resources.memory))
if self.resources.disk and self.disk_utilization > self.resources.disk:
kill_messages.append('Disk limit %sb exceeded.' % size_str(self.resources.disk))
if kill_messages:
self.kill_message = ' '.join(kill_messages)
self.is_killed = True
except docker_utils.DockerException:
logger.error(traceback.format_exc())
"""
self._prune_failed_dependencies()
# With all the locks (should be fast if no cleanup needed, otherwise make sure nothing is corrupted
while True:
with self._global_lock:
self._acquire_all_locks()
bytes_used = sum(dep_state.size_bytes for dep_state in self._dependencies.values())
serialized_length = len(codalab.worker.pyjson.dumps(self._dependencies))
if (
bytes_used > self._max_cache_size_bytes
or serialized_length > LocalFileSystemDependencyManager.MAX_SERIALIZED_LEN
):
logger.debug(
'%d dependencies in cache, disk usage: %s (max %s), serialized size: %s (max %s)',
len(self._dependencies),
size_str(bytes_used),
size_str(self._max_cache_size_bytes),
size_str(serialized_length),
LocalFileSystemDependencyManager.MAX_SERIALIZED_LEN,
)
ready_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.READY and not dep_state.dependents
}
failed_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.FAILED
}
if failed_deps:
dep_key_to_remove = min(
# With all the locks (should be fast if no cleanup needed, otherwise make sure nothing is corrupted
while True:
with self._global_lock:
self._acquire_all_locks()
bytes_used = sum(dep_state.size_bytes for dep_state in self._dependencies.values())
serialized_length = len(codalab.worker.pyjson.dumps(self._dependencies))
if (
bytes_used > self._max_cache_size_bytes
or serialized_length > LocalFileSystemDependencyManager.MAX_SERIALIZED_LEN
):
logger.debug(
'%d dependencies in cache, disk usage: %s (max %s), serialized size: %s (max %s)',
len(self._dependencies),
size_str(bytes_used),
size_str(self._max_cache_size_bytes),
size_str(serialized_length),
LocalFileSystemDependencyManager.MAX_SERIALIZED_LEN,
)
ready_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.READY and not dep_state.dependents
}
failed_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.FAILED
}
if failed_deps:
dep_key_to_remove = min(
failed_deps.items(), key=lambda dep: dep[1].last_used
)[0]
def size(x):
t = x.get('type', '???')
if t == 'file':
return formatting.size_str(x['size'])
elif t == 'directory':
return 'dir'
else:
return t