Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _extend_volume(self, new_size):
try:
self.client().volumes.extend(self.resource_id, new_size)
except Exception as ex:
if self.client_plugin().is_client_exception(ex):
raise exception.Error(_(
"Failed to extend volume %(vol)s - %(err)s") % {
'vol': self.resource_id, 'err': str(ex)})
else:
raise
return True
try:
signal = self.client().get_object(self.stack.id, obj['name'])
except Exception as exc:
self.client_plugin().ignore_not_found(exc)
continue
body = signal[1]
if body == swift.IN_PROGRESS: # Ignore the initial object
continue
if body == "":
obj_bodies.append({})
continue
try:
obj_bodies.append(jsonutils.loads(body))
except ValueError:
raise exception.Error(_("Failed to parse JSON data: %s") %
body)
# Set default values on each signal
signals = []
signal_num = 1
for signal in obj_bodies:
# Remove previous signals with the same ID
sig_id = self.UNIQUE_ID
ids = [s.get(sig_id) for s in signals if sig_id in s]
if ids and sig_id in signal and ids.count(signal[sig_id]) > 0:
[signals.remove(s) for s in signals
if s.get(sig_id) == signal[sig_id]]
# Make sure all fields are set, since all are optional
signal.setdefault(self.DATA, None)
def _check_complete(self):
sd = self.rpc_client().show_software_deployment(
self.context, self.resource_id)
status = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS]
if status == SoftwareDeployment.COMPLETE:
return True
elif status == SoftwareDeployment.FAILED:
status_reason = sd[rpc_api.SOFTWARE_DEPLOYMENT_STATUS_REASON]
message = _("Deployment to server failed: %s") % status_reason
LOG.info(message)
raise exception.Error(message)
def handle_suspend(self):
"""Suspend a server.
Note we do not wait for the SUSPENDED state, this is polled for by
check_suspend_complete in a similar way to the create logic so we can
take advantage of coroutines.
"""
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug('suspending server %s', self.resource_id)
server.suspend()
def handle_create(self):
router_id = self.properties.get(self.ROUTER_ID)
routes = self.client().show_router(
router_id).get('router').get('routes')
if not routes:
routes = []
new_route = {'destination': self.properties[self.DESTINATION],
'nexthop': self.properties[self.NEXTHOP]}
if new_route in routes:
msg = _('Route duplicates an existing route.')
raise exception.Error(msg)
routes.append(new_route.copy())
self.client().update_router(router_id,
{'router': {'routes': routes}})
new_route['router_id'] = router_id
self.resource_id_set(
'%(router_id)s:%(destination)s:%(nexthop)s' % new_route)
def stack_domain_user_token(self, user_id, project_id, password):
"""Get a token for a stack domain user."""
if not self.stack_domain:
# Note, no legacy fallback path as we don't want to deploy
# tokens for non stack-domain users inside instances
msg = _('Cannot get stack domain user token, no stack domain id '
'configured, please fix your heat.conf')
raise exception.Error(msg)
# Create a keystone session, then request a token with no
# catalog (the token is expected to be used inside an instance
# where a specific endpoint will be specified, and user-data
# space is limited..)
# TODO(rabi): generic auth plugins don't support `include_catalog'
# flag yet. We'll add it once it's supported..
auth = ks_auth.Password(auth_url=self.v3_endpoint,
user_id=user_id,
password=password,
project_id=project_id)
return auth.get_token(self.session)
def __call__(self):
LOG.debug(str(self))
cinder = self.clients.client('cinder').volumes
vol = cinder.get(self.volume_id)
try:
cinder.extend(self.volume_id, self.size)
except Exception as ex:
if self.clients.client_plugin('cinder').is_client_exception(ex):
raise exception.Error(_(
"Failed to extend volume %(vol)s - %(err)s") % {
'vol': vol.id, 'err': str(ex)})
else:
raise
yield
vol.get()
while vol.status == 'extending':
LOG.debug("Volume %s is being extended" % self.volume_id)
yield
vol.get()
if vol.status != 'available':
LOG.info(_LI("Resize failed: Volume %(vol)s is in %(status)s "
"state."), {'vol': vol.id, 'status': vol.status})
"rax_service_level_automation metadata tag yet")
return False
mc_status = server.metadata['rax_service_level_automation']
LOG.debug("Rackspace Cloud automation status: %s" % mc_status)
if mc_status == self.SM_STATUS_IN_PROGRESS:
return False
elif mc_status == self.SM_STATUS_COMPLETE:
msg = _("Rackspace Cloud automation has completed")
self._add_event(self.action, self.status, msg)
return True
elif mc_status == self.SM_STATUS_BUILD_ERROR:
raise exception.Error(_("Rackspace Cloud automation failed"))
else:
raise exception.Error(_("Unknown Rackspace Cloud automation "
"status: %s") % mc_status)
self.interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
try:
self.context.keystone_session.get_endpoint(
service_type=self.VOLUME_V3,
interface=self.interface)
self.service_type = self.VOLUME_V3
self.client_version = '3'
except ks_exceptions.EndpointNotFound:
try:
self.context.keystone_session.get_endpoint(
service_type=self.VOLUME_V2,
interface=self.interface)
self.service_type = self.VOLUME_V2
self.client_version = '2'
except ks_exceptions.EndpointNotFound:
raise exception.Error(_('No volume service available.'))
def handle_resume(self):
"""Resume an instance.
Note we do not wait for the ACTIVE state, this is polled for by
check_resume_complete in a similar way to the create logic so we can
take advantage of coroutines.
"""
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find instance %s') %
self.resource_id)
else:
raise
else:
# if the instance has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug("resuming instance %s", self.resource_id)
server.resume()