Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
return ret
S3_POOL = None
GC_POOL = None
def reset_connection_pools():
global S3_POOL
global GC_POOL
S3_POOL = keydefaultdict(lambda service: keydefaultdict(lambda bucket_name: S3ConnectionPool(service, bucket_name)))
GC_POOL = keydefaultdict(lambda bucket_name: GCloudBucketPool(bucket_name))
reset_connection_pools()
retry = tenacity.retry(
reraise=True,
stop=tenacity.stop_after_attempt(7),
wait=tenacity.wait_random_exponential(0.5, 60.0),
)
DEFAULT_THREADS = 20
class SimpleStorage(object):
"""
Access files stored in Google Storage (gs), Amazon S3 (s3),
or the local Filesystem (file).
e.g. with Storage('gs://bucket/dataset/layer') as stor:
files = stor.get_file('filename')
Required:
layer_path (str): A protocol prefixed path of the above format.
Accepts s3:// gs:// and file://. File paths are absolute.
from six.moves import queue as Queue
import threading
import time
from functools import partial
import boto3
from google.cloud.storage import Client
import tenacity
from .secrets import google_credentials, aws_credentials
from .exceptions import UnsupportedProtocolError
retry = tenacity.retry(
reraise=True,
stop=tenacity.stop_after_attempt(7),
wait=tenacity.wait_random_exponential(0.5, 60.0),
)
class ConnectionPool(object):
"""
This class is intended to be subclassed. See below.
Creating fresh client or connection objects
for Google or Amazon eventually starts causing
breakdowns when too many connections open.
To promote efficient resource use and prevent
containers from dying, we create a ConnectionPool
that allows for the reuse of connections.
Storage interfaces may acquire and release connections
when they need or finish using them.
wait=wait_random_exponential(multiplier=1, max=30),
retry=retry_if_exception_type(ClientError),
)
def _get_cloudformation_exports(target_region):
cloudformation_client = boto3.client('cloudformation', region_name=target_region)
paginator = cloudformation_client.get_paginator('list_exports')
exports_page_iterator = paginator.paginate()
exports = {
export['Name']: {
'Value': export['Value'],
'ExportingStackId': export['ExportingStackId'],
} for page in exports_page_iterator for export in page['Exports']
}
return exports
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def enable(service, now=True, log=None):
cmd = ['enable']
if now:
cmd.append('--now')
cmd.append(service)
try:
systemctl(cmd, log)
except SystemctlException as err:
# Reset failure counters for the service unit and retry
reset_failed(service, log)
raise SystemctlException(str(err))
@retry(wait=wait_random_exponential(multiplier=1, max=60),
retry=(retry_if_exception_type(psycopg2.OperationalError) | retry_if_exception_type(ConnectionError)),
stop=stop_after_delay(120))
def init_db_pool(db_min_connections, db_max_connections):
"""
Initializes the database connection pool required by the application to connect to the database.
"""
db = config.DATABASE
host = config.DATABASE_SERVER
user = config.DATABASE_USER
pw = config.DATABASE_PASSWORD
global connection_pool
if connection_pool is None:
logger.info("Initializing the database connection pool. db: '%s', user: '%s', host: '%s'.", db, user, host)
try:
connection_pool = ThreadedConnectionPool(db_min_connections, db_max_connections, database=db, user=user,
wait=tenacity.wait_random_exponential(multiplier=0.5),
stop=tenacity.stop_after_attempt(max_attempt_number=10),
reraise=True,
after=tenacity.after_log(logger, logging.INFO))
def head_object_with_retry(client, bucket, key):
return client.head_object(Bucket=bucket, Key=key)
def sync(cnxt, entity_id, current_traversal, is_update, propagate,
predecessors, new_data):
# Retry waits up to 60 seconds at most, with exponentially increasing
# amounts of jitter per resource still outstanding
wait_strategy = tenacity.wait_random_exponential(max=60)
def init_jitter(existing_input_data):
nconflicts = max(0, len(predecessors) - len(existing_input_data) - 1)
# 10ms per potential conflict, up to a max of 10s in total
return min(nconflicts, 1000) * 0.01
@tenacity.retry(
retry=tenacity.retry_if_result(lambda r: r is None),
wait=wait_strategy
)
def _sync():
sync_point = get(cnxt, entity_id, current_traversal, is_update)
input_data = deserialize_input_data(sync_point.input_data)
wait_strategy.multiplier = init_jitter(input_data)
input_data.update(new_data)
rows_updated = update_input_data(
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(4)
)
def _pull(self, image):
cmd = [self.runner.cont_cmd, 'pull', image]
(stdout, stderr, rc) = self.runner.execute(cmd, self.log)
if rc != 0:
raise PullException(stdout, stderr, rc)
return stdout, stderr
import boto3
import botocore
import tenacity
from hub.log import logger
from hub.utils.store_control import StoreControlClient
retry = tenacity.retry(
reraise=True,
stop=tenacity.stop_after_attempt(7),
wait=tenacity.wait_random_exponential(0.5, 60.0),
)
class Storage(object):
def __init__(self):
return
def get(self, path):
raise NotImplementedError
def put(self, path, file):
raise NotImplementedError
class S3(Storage):
def __init__(self, bucket, public=False):
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _detect_target_export(cls, image_url, session):
if image_url.netloc in cls.export_registries:
return True
if image_url.netloc in cls.push_registries:
return False
# detect if the registry is push-capable by requesting an upload URL.
image, _ = cls._image_tag_from_url(image_url)
upload_req_url = cls._build_url(
image_url,
path=CALL_UPLOAD % {'image': image})
try:
RegistrySessionHelper.post(
session,