Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async def test_connector_timeout(event_loop):
session = AioSession(loop=event_loop)
config = AioConfig(max_pool_connections=1, connect_timeout=1,
retries={'max_attempts': 0})
async with AIOServer() as server, \
session.create_client('s3', config=config,
endpoint_url=server.endpoint_url,
aws_secret_access_key='xxx',
aws_access_key_id='xxx') as s3_client:
async def get_and_wait():
await s3_client.get_object(Bucket='foo', Key='bar')
await asyncio.sleep(100)
task1 = asyncio.Task(get_and_wait(), loop=event_loop)
task2 = asyncio.Task(get_and_wait(), loop=event_loop)
try:
done, pending = await asyncio.wait([task1, task2],
connector_args = dict(force_close="1")
AioConfig(connector_args)
with pytest.raises(ParamValidationError):
# wrong type
connector_args = dict(ssl_context="1")
AioConfig(connector_args)
with pytest.raises(ParamValidationError):
# invalid key
connector_args = dict(foo="1")
AioConfig(connector_args)
# test merge
cfg = Config(read_timeout=75)
aio_cfg = AioConfig({'keepalive_timeout': 75})
aio_cfg.merge(cfg)
assert cfg.read_timeout == 75
assert aio_cfg.connector_args['keepalive_timeout'] == 75
async def test_connector_timeout2(event_loop):
session = AioSession(loop=event_loop)
config = AioConfig(max_pool_connections=1, connect_timeout=1,
read_timeout=1, retries={'max_attempts': 0})
async with AIOServer() as server, \
session.create_client('s3', config=config,
endpoint_url=server.endpoint_url,
aws_secret_access_key='xxx',
aws_access_key_id='xxx') as s3_client:
with pytest.raises(asyncio.TimeoutError):
resp = await s3_client.get_object(Bucket='foo', Key='bar')
await resp["Body"].read()
aws_secret_access_key = cred["SecretAccessKey"]
aws_access_key_id = cred["AccessKeyId"]
aws_cred_expiration = cred["Expiration"]
aws_session_token = cred["Token"]
log.info(f"Got Expiration of: {aws_cred_expiration}")
expiration_str = aws_cred_expiration[:-1] + "UTC" # trim off 'Z' and add 'UTC'
# save the expiration
app["token_expiration"] = datetime.datetime.strptime(expiration_str, "%Y-%m-%dT%H:%M:%S%Z")
except json.JSONDecodeError:
msg = "Unexpected error decoding EC2 meta-data response"
log.error(msg)
except KeyError:
msg = "Missing expected key from EC2 meta-data response"
log.error(msg)
aio_config = AioConfig(max_pool_connections=max_pool_connections)
self._client = session.create_client('s3', region_name=aws_region,
aws_secret_access_key=aws_secret_access_key,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token,
endpoint_url=s3_gateway,
use_ssl=use_ssl,
config=aio_config)
app['s3'] = self._client # save so same client can be returned in subsequent calls
event_emitter = copy.copy(self._event_emitter)
signer = RequestSigner(
service_model.service_id, signing_region,
endpoint_config['signing_name'],
endpoint_config['signature_version'],
credentials, event_emitter
)
config_kwargs['s3'] = s3_config
if isinstance(client_config, AioConfig):
connector_args = client_config.connector_args
else:
connector_args = None
new_config = AioConfig(connector_args, **config_kwargs)
endpoint_creator = AioEndpointCreator(event_emitter, loop=self._loop)
endpoint = endpoint_creator.create_endpoint(
service_model, region_name=endpoint_region_name,
endpoint_url=endpoint_config['endpoint_url'], verify=verify,
response_parser_factory=self._response_parser_factory,
max_pool_connections=new_config.max_pool_connections,
proxies=new_config.proxies,
timeout=(new_config.connect_timeout, new_config.read_timeout),
socket_options=socket_options,
client_cert=new_config.client_cert,
connector_args=new_config.connector_args)
serializer = botocore.serialize.create_serializer(
protocol, parameter_validation)
response_parser = botocore.parsers.create_parser(protocol)
nconcurrent=24,
region_name=None,
addressing_style='path',
aws_unsigned=None):
if region_name is None:
region_name = auto_find_region()
opts = {}
if aws_unsigned is None:
aws_unsigned = _aws_unsigned_check_env()
if aws_unsigned:
opts['signature_version'] = botocore.UNSIGNED
s3_cfg = AioConfig(max_pool_connections=nconcurrent,
**opts,
s3=dict(addressing_style=addressing_style))
self._nconcurrent = nconcurrent
self._async = AsyncThread()
self._s3 = None
self._s3_ctx = None
self._session = None
self._closed = False
async def setup(s3_cfg):
session = aiobotocore.get_session()
s3_ctx = session.create_client('s3',
region_name=region_name,
config=s3_cfg)
s3 = await s3_ctx.__aenter__()
log.info(f"Got Expiration of: {aws_cred_expiration}")
expiration_str = aws_cred_expiration[:-1] + "UTC" # trim off 'Z' and add 'UTC'
# save the expiration
app["lambda_token_expiration"] = datetime.datetime.strptime(expiration_str, "%Y-%m-%dT%H:%M:%S%Z")
except json.JSONDecodeError:
msg = "Unexpected error decoding EC2 meta-data response"
log.error(msg)
except KeyError:
msg = "Missing expected key from EC2 meta-data response"
log.error(msg)
aws_region = config.get("aws_region")
if not aws_region:
aws_region = "us-east-1"
max_pool_connections = config.get('aio_max_pool_connections')
aio_config = AioConfig(max_pool_connections=max_pool_connections)
lambda_client = session.create_client('lambda',
region_name=aws_region,
aws_secret_access_key=aws_secret_access_key,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token,
use_ssl=use_ssl,
config=aio_config)
app["lambda"] = lambda_client
return lambda_client
if signing_region is None and endpoint_region_name is None:
signing_region, endpoint_region_name = \
self._get_default_s3_region(service_name, endpoint_bridge)
config_kwargs['region_name'] = endpoint_region_name
event_emitter = copy.copy(self._event_emitter)
signer = RequestSigner(
service_model.service_id, signing_region,
endpoint_config['signing_name'],
endpoint_config['signature_version'],
credentials, event_emitter
)
config_kwargs['s3'] = s3_config
if isinstance(client_config, AioConfig):
connector_args = client_config.connector_args
else:
connector_args = None
new_config = AioConfig(connector_args, **config_kwargs)
endpoint_creator = AioEndpointCreator(event_emitter, loop=self._loop)
endpoint = endpoint_creator.create_endpoint(
service_model, region_name=endpoint_region_name,
endpoint_url=endpoint_config['endpoint_url'], verify=verify,
response_parser_factory=self._response_parser_factory,
max_pool_connections=new_config.max_pool_connections,
proxies=new_config.proxies,
timeout=(new_config.connect_timeout, new_config.read_timeout),
socket_options=socket_options,
client_cert=new_config.client_cert,
s3_gateway.append(item.strip())
if isinstance(s3_gateway, list):
# use the node number to select an item from the list
node_number = 0
if "node_number" in app:
node_number = app["node_number"]
item = s3_gateway[node_number % len(s3_gateway)]
log.debug(f"selecting: {item} from s3_gateway list: {s3_gateway}")
s3_gateway = item
log.info(f"Using S3Gateway: {s3_gateway}")
use_ssl = False
if s3_gateway.startswith("https"):
use_ssl = True
max_pool_connections = config.get('aio_max_pool_connections')
aio_config = AioConfig(max_pool_connections=max_pool_connections)
s3 = session.create_client('s3', region_name=aws_region,
aws_secret_access_key=aws_secret_access_key,
aws_access_key_id=aws_access_key_id,
aws_session_token=aws_session_token,
endpoint_url=s3_gateway,
use_ssl=use_ssl,
config=aio_config)
app['s3'] = s3 # save so same client can be returned in subsequent calls
return s3
def merge(self, other_config):
# Adapted from parent class
config_options = copy.copy(self._user_provided_options)
config_options.update(other_config._user_provided_options)
return AioConfig(self.connector_args, **config_options)