Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def write(self, bytes):
self._buffer.put(bytes, block=True)
def writelines(self, sequence):
for s in sequence:
self.write(s)
class _Boto3Driver(_Driver):
""" Boto3 storage adapter (simple, enough for now) """
_max_multipart_concurrency = config.get('aws.boto3.max_multipart_concurrency', 16)
_min_pool_connections = 512
_pool_connections = config.get('aws.boto3.pool_connections', 512)
_stream_download_pool_connections = 128
_stream_download_pool = None
_containers = {}
scheme = 's3'
scheme_prefix = str(furl(scheme=scheme, netloc=''))
_bucket_location_failure_reported = set()
class _Container(object):
_creation_lock = threading.Lock()
def __init__(self, name, cfg):
try:
You can also query existing tasks in the system by calling :func:`Task.get_task`.
**Usage:** :func:`Task.init` or :func:`Task.get_task`
"""
TaskTypes = _Task.TaskTypes
NotSet = object()
__create_protection = object()
__main_task = None
__exit_hook = None
__forked_proc_main_pid = None
__task_id_reuse_time_window_in_hours = float(config.get('development.task_reuse_time_window_in_hours', 24.0))
__store_diff_on_train = config.get('development.store_uncommitted_code_diff_on_train', False)
__detect_repo_async = config.get('development.vcs_repo_detect_async', False)
__default_output_uri = config.get('development.default_output_uri', None)
class _ConnectedParametersType(object):
argparse = "argument_parser"
dictionary = "dictionary"
task_parameters = "task_parameters"
@classmethod
def _options(cls):
return {
var for var, val in vars(cls).items()
if isinstance(val, six.string_types)
}
def __init__(self, private=None, **kwargs):
def _setup_log(self, default_log_to_backend=None, replace_existing=False):
"""
Setup logging facilities for this task.
:param default_log_to_backend: Should this task log to the backend. If not specified, value for this option
will be obtained from the environment, with this value acting as a default in case configuration for this is
missing.
If the value for this option is false, we won't touch the current logger configuration regarding TaskHandler(s)
:param replace_existing: If True and another task is already logging to the backend, replace the handler with
a handler for this task.
"""
# Make sure urllib is never in debug/info,
disable_urllib3_info = config.get('log.disable_urllib3_info', True)
if disable_urllib3_info and logging.getLogger('urllib3').isEnabledFor(logging.INFO):
logging.getLogger('urllib3').setLevel(logging.WARNING)
log_to_backend = get_log_to_backend(default=default_log_to_backend) or self._log_to_backend
if not log_to_backend:
return
# Handle the root logger and our own logger. We use set() to make sure we create no duplicates
# in case these are the same logger...
loggers = {logging.getLogger(), LoggerRoot.get_base_logger()}
# Find all TaskHandler handlers for these loggers
handlers = {logger: h for logger in loggers for h in logger.handlers if isinstance(h, TaskHandler)}
if handlers and not replace_existing:
# Handlers exist and we shouldn't replace them
from time import time
from humanfriendly import format_timespan
from pathlib2 import Path
from ...backend_api.services import events as api_events
from ..base import InterfaceBase
from ...config import config
from ...debugging import get_logger
from ...storage import StorageHelper
from .events import MetricsEventAdapter
upload_pool = ThreadPool(processes=1)
file_upload_pool = ThreadPool(processes=config.get('network.metrics.file_upload_threads', 4))
log = get_logger('metrics')
class Metrics(InterfaceBase):
""" Metrics manager and batch writer """
_storage_lock = Lock()
_file_upload_starvation_warning_sec = config.get('network.metrics.file_upload_starvation_warning_sec', None)
_file_upload_retries = 3
@property
def storage_key_prefix(self):
return self._storage_key_prefix
def _get_storage(self, storage_uri=None):
""" Storage helper used to upload files """
import attr
from threading import Thread, Event
from time import time
from ....config import config
from ....backend_interface.task.development.stop_signal import TaskStopSignal
from ....backend_api.services import tasks
class DevWorker(object):
prefix = attr.ib(type=str, default="MANUAL:")
report_period = float(max(config.get('development.worker.report_period_sec', 30.), 1.))
report_stdout = bool(config.get('development.worker.log_stdout', True))
ping_period = float(max(config.get('development.worker.ping_period_sec', 30.), 1.))
def __init__(self):
self._dev_stop_signal = None
self._thread = None
self._exit_event = Event()
self._task = None
self._support_ping = False
def ping(self, timestamp=None):
try:
if self._task:
self._task.send(tasks.PingRequest(self._task.id))
except Exception:
return False
return True
from ....config import config
from ....backend_api.services import tasks
class TaskStopReason(object):
stopped = "stopped"
reset = "reset"
status_changed = "status_changed"
class TaskStopSignal(object):
enabled = bool(config.get('development.support_stopping', False))
_number_of_consecutive_reset_tests = 4
# _unexpected_statuses = (
# tasks.TaskStatusEnum.closed,
# tasks.TaskStatusEnum.stopped,
# tasks.TaskStatusEnum.failed,
# tasks.TaskStatusEnum.published,
# tasks.TaskStatusEnum.completed,
# )
def __init__(self, task):
from ....backend_interface import Task
assert isinstance(task, Task)
self.task = task
self._task_reset_state_counter = 0
**Usage:** :func:`Task.init` or :func:`Task.get_task`
"""
TaskTypes = _Task.TaskTypes
NotSet = object()
__create_protection = object()
__main_task = None
__exit_hook = None
__forked_proc_main_pid = None
__task_id_reuse_time_window_in_hours = float(config.get('development.task_reuse_time_window_in_hours', 24.0))
__store_diff_on_train = config.get('development.store_uncommitted_code_diff_on_train', False)
__detect_repo_async = config.get('development.vcs_repo_detect_async', False)
__default_output_uri = config.get('development.default_output_uri', None)
class _ConnectedParametersType(object):
argparse = "argument_parser"
dictionary = "dictionary"
task_parameters = "task_parameters"
@classmethod
def _options(cls):
return {
var for var, val in vars(cls).items()
if isinstance(val, six.string_types)
}
def __init__(self, private=None, **kwargs):
"""
Do not construct Task manually!
def get_api_event(self):
return events.MetricsImageEvent(
url=self._url,
key=self._key,
**self._get_base_dict())
class UploadEvent(MetricsEventAdapter):
""" Image event adapter """
_format = '.' + str(config.get('metrics.images.format', 'JPEG')).upper().lstrip('.')
_quality = int(config.get('metrics.images.quality', 87))
_subsampling = int(config.get('metrics.images.subsampling', 0))
_metric_counters = {}
_metric_counters_lock = Lock()
_image_file_history_size = int(config.get('metrics.file_history_size', 5))
@staticmethod
def _replace_slash(part):
return part.replace('\\', '/').strip('/').replace('/', '.slash.')
def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None,
image_file_history_size=None, delete_after_upload=False, **kwargs):
# param override_filename: override uploaded file name (notice extension will be added from local path
# param override_filename_ext: override uploaded file extension
if image_data is not None and not hasattr(image_data, 'shape'):
raise ValueError('Image must have a shape attribute')
self._image_data = image_data
self._local_image_path = local_image_path
self._url = None
self._key = None
self._count = self._get_metric_count(metric, variant)
from furl import furl
from pathlib2 import Path
from requests.exceptions import ConnectionError
from six import binary_type
from six.moves.queue import Queue, Empty
from six.moves.urllib.parse import urlparse
from six.moves.urllib.request import url2pathname
from ..backend_api.utils import get_http_session_with_retry
from ..backend_config.bucket_config import S3BucketConfigurations, GSBucketConfigurations, AzureContainerConfigurations
from ..config import config
from ..debugging import get_logger
from ..errors import UsageError
log = get_logger('storage')
level = config.get('storage.log.level', None)
if level:
try:
log.setLevel(level)
except (TypeError, ValueError):
log.error('invalid storage log level in configuration: %s' % level)
upload_pool = ThreadPool(processes=1)
class StorageError(Exception):
pass
class DownloadError(Exception):
pass