Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
buckets = s3_conn.get_all_buckets()
except S3ResponseError as e:
response['error'] = "S3ResponseError getting buckets: %s" % e
except self.http_exceptions as ex:
response['error'] = "Exception getting buckets: %s" % ex
if response['error']:
bioblend.log.exception(response['error'])
return response
for bucket in [b for b in buckets if b.name.startswith('cm-')]:
try:
# TODO: first lookup if persistent_data.yaml key exists
pd = bucket.get_key('persistent_data.yaml')
except S3ResponseError:
# This can fail for a number of reasons for non-us and/or
# CNAME'd buckets but it is not a terminal error
bioblend.log.warning("Problem fetching persistent_data.yaml from bucket %s", bucket)
continue
if pd:
# We are dealing with a CloudMan bucket
pd_contents = pd.get_contents_as_string()
pd = yaml.load(pd_contents)
if 'cluster_name' in pd:
cluster_name = pd['cluster_name']
else:
for key in bucket.list():
if key.name.endswith('.clusterName'):
cluster_name = key.name.split('.clusterName')[0]
cluster = {'cluster_name': cluster_name,
'persistent_data': pd,
'bucket_name': bucket.name}
# Look for cluster's placement too
if include_placement:
if cmsg:
progress['name'] = cmsg.name
progress['sg_id'] = cmsg.id
# Add appropriate authorization rules
# If these rules already exist, nothing will be changed in the SG
for port in ports:
try:
if not self.rule_exists(
cmsg.rules, from_port=port[0], to_port=port[1]):
cmsg.authorize(
ip_protocol='tcp',
from_port=port[0],
to_port=port[1],
cidr_ip='0.0.0.0/0')
else:
bioblend.log.debug("Rule (%s:%s) already exists in the SG", port[0], port[1])
except EC2ResponseError as e:
err_msg = "A problem adding security group authorizations: {0} " \
"(code {1}; status {2})" \
.format(str(e), e.error_code, e.status)
bioblend.log.exception(err_msg)
progress['error'] = err_msg
# Add ICMP (i.e., ping) rule required by HTCondor
try:
if not self.rule_exists(
cmsg.rules, from_port='-1', to_port='-1', ip_protocol='icmp'):
cmsg.authorize(
ip_protocol='icmp',
from_port=-1,
to_port=-1,
cidr_ip='0.0.0.0/0')
else:
"""
assert vm_ready_timeout > 0
assert vm_ready_timeout > vm_ready_check_interval
assert vm_ready_check_interval > 0
if self.host_name: # Host name available. Therefore, instance is ready
return
for time_left in range(vm_ready_timeout, 0, -vm_ready_check_interval):
status = self.get_machine_status()
if status['public_ip'] != '' and status['error'] == '':
self._init_instance(status['public_ip'])
return
elif status['error'] != '':
msg = "Error launching an instance: {0}".format(status['error'])
bioblend.log.error(msg)
raise VMLaunchException(msg)
else:
bioblend.log.warning("Instance not ready yet (it's in state '{0}'); waiting another {1} seconds..."
.format(status['instance_state'], time_left))
time.sleep(vm_ready_check_interval)
raise VMLaunchException("Waited too long for instance to become ready. Instance Id: %s"
% self.instance_id)
else:
if r.status_code == 200:
if not json:
return r
elif not r.content:
msg = "GET: empty response"
else:
try:
return r.json()
except ValueError:
msg = "GET: invalid JSON : %r" % (r.content,)
else:
msg = "GET: error %s: %r" % (r.status_code, r.content)
msg = "%s, %d attempts left" % (msg, attempts_left)
if attempts_left <= 0:
bioblend.log.error(msg)
raise ConnectionError(msg, body=r.text,
status_code=r.status_code)
else:
bioblend.log.warning(msg)
time.sleep(retry_delay)
def _get_volume_placement(self, vol_id):
"""
Returns the placement of a volume (or None, if it cannot be determined)
"""
try:
vol = self.ec2_conn.get_all_volumes(volume_ids=[vol_id])
except EC2ResponseError as ec2e:
bioblend.log.error("EC2ResponseError querying for volume {0}: {1}"
.format(vol_id, ec2e))
vol = None
if vol:
return vol[0].zone
else:
bioblend.log.error("Requested placement of a volume '%s' that does not exist.", vol_id)
return None
if g_rule_exists:
break
if not g_rule_exists:
try:
cmsg.authorize(
src_group=cmsg,
ip_protocol='tcp',
from_port=0,
to_port=65535)
except EC2ResponseError as e:
err_msg = "A problem with security group group " \
"authorization: {0} (code {1}; status {2})" \
.format(str(e), e.error_code, e.status)
bioblend.log.exception(err_msg)
progress['err_msg'] = err_msg
bioblend.log.info("Done configuring '%s' security group", cmsg.name)
else:
bioblend.log.warning(
"Did not create security group '{0}'".format(sg_name))
return progress
.. versionadded:: 0.3
.. versionchanged:: 0.7.0
The return value changed from a list to a dictionary.
"""
clusters = []
response = {'clusters': clusters, 'error': None}
s3_conn = self.connect_s3(self.access_key, self.secret_key, self.cloud)
try:
buckets = s3_conn.get_all_buckets()
except S3ResponseError as e:
response['error'] = "S3ResponseError getting buckets: %s" % e
except self.http_exceptions as ex:
response['error'] = "Exception getting buckets: %s" % ex
if response['error']:
bioblend.log.exception(response['error'])
return response
for bucket in [b for b in buckets if b.name.startswith('cm-')]:
try:
# TODO: first lookup if persistent_data.yaml key exists
pd = bucket.get_key('persistent_data.yaml')
except S3ResponseError:
# This can fail for a number of reasons for non-us and/or
# CNAME'd buckets but it is not a terminal error
bioblend.log.warning("Problem fetching persistent_data.yaml from bucket %s", bucket)
continue
if pd:
# We are dealing with a CloudMan bucket
pd_contents = pd.get_contents_as_string()
pd = yaml.load(pd_contents)
if 'cluster_name' in pd:
cluster_name = pd['cluster_name']
'sg_ids': [],
'kp_name': '',
'kp_material': '',
'rs': None,
'instance_id': '',
'error': None}
# First satisfy the prerequisites
for sg in security_groups:
# Get VPC ID in case we're launching into a VPC
vpc_id = None
if subnet_id:
try:
sn = self.vpc_conn.get_all_subnets(subnet_id)[0]
vpc_id = sn.vpc_id
except (EC2ResponseError, IndexError):
bioblend.log.exception("Trouble fetching subnet %s", subnet_id)
cmsg = self.create_cm_security_group(sg, vpc_id=vpc_id)
ret['error'] = cmsg['error']
if ret['error']:
return ret
if cmsg['name']:
ret['sg_names'].append(cmsg['name'])
ret['sg_ids'].append(cmsg['sg_id'])
if subnet_id:
# Must setup a network interface if launching into VPC
security_groups = None
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=subnet_id, groups=[cmsg['sg_id']],
associate_public_ip_address=True)
network_interfaces = (boto.ec2.networkinterface.
NetworkInterfaceCollection(interface))
else:
def __init__(self, url, api_key=None, email=None, password=None, verify=True):
self.gi = bioblend.galaxy.GalaxyInstance(url, api_key, email, password, verify)
self.log = bioblend.log
self.histories = client.ObjHistoryClient(self)
self.libraries = client.ObjLibraryClient(self)
self.workflows = client.ObjWorkflowClient(self)
self.tools = client.ObjToolClient(self)
self.jobs = client.ObjJobClient(self)
``contents``. Alternatively, an explicit ``url`` can be provided.
If ``json`` is set to ``True``, return a decoded JSON object
(and treat an empty or undecodable response as an error).
The request will optionally be retried as configured by
``max_get_retries`` and ``get_retry_delay``: this offers some
resilience in the presence of temporary failures.
:return: The decoded response if ``json`` is set to ``True``, otherwise
the response object
"""
if not url:
url = self._make_url(module_id=id, deleted=deleted, contents=contents)
attempts_left = self.max_get_retries()
retry_delay = self.get_retry_delay()
bioblend.log.debug("GET - attempts left: %s; retry delay: %s",
attempts_left, retry_delay)
msg = ''
while attempts_left > 0:
attempts_left -= 1
try:
r = self.gi.make_get_request(url, params=params)
except (requests.exceptions.ConnectionError, ProtocolError) as e:
msg = str(e)
r = requests.Response() # empty Response object used when raising ConnectionError
else:
if r.status_code == 200:
if not json:
return r
elif not r.content:
msg = "GET: empty response"
else: