How to use the retrying.RetryError function in retrying

To help you get started, we’ve selected a few retrying examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dcos / dcos / packages / dcos-integration-test / extra / test_service_discovery.py View on Github external
def _pool_for_mesos_dns():
            r = dcos_api_session.get('/mesos_dns/v1/services/_{}._tcp.marathon.mesos'.format(
                app_definition['id'].lstrip('/')))
            assert r.status_code == 200

            r_data = r.json()
            if r_data == [{'host': '', 'port': '', 'service': '', 'ip': ''}] or len(r_data) < len(service_points):
                logging.info("Waiting for Mesos-DNS to update entries")
                return None
            else:
                logging.info("Mesos-DNS entries have been updated!")
                return r_data

        try:
            r_data = _pool_for_mesos_dns()
        except retrying.RetryError:
            msg = "Mesos DNS has failed to update entries in {} seconds."
            pytest.fail(msg.format(DNS_ENTRY_UPDATE_TIMEOUT))

        marathon_provided_servicepoints = sorted((x.host, x.port) for x in service_points)
        mesosdns_provided_servicepoints = sorted((x['ip'], int(x['port'])) for x in r_data)
        assert marathon_provided_servicepoints == mesosdns_provided_servicepoints

        # Verify if containers themselves confirm what Marathon says:
        payload = {"reflector_ip": service_points[1].host,
                   "reflector_port": service_points[1].port}
        r = requests.post('http://{}:{}/your_ip'.format(
            service_points[0].host, service_points[0].port), payload)
        if r.status_code != 200:
            msg = "Test server replied with non-200 reply: '{status_code} {reason}. "
            msg += "Detailed explanation of the problem: {text}"
            pytest.fail(msg.format(status_code=r.status_code, reason=r.reason, text=r.text))
github dcos / dcos-launch / dcos_test_utils / marathon.py View on Github external
# the pod's status to become STABLE is sufficient. In the future,
            # if test pod deployments become more complex, we should switch to
            # using Marathon's event bus and listen for specific events.
            # See DCOS_OSS-1056.
            r = self.get('v2/pods' + pod_id + '::status')
            r.raise_for_status()
            data = r.json()
            if 'status' in data and data['status'] == 'STABLE':
                # deployment complete
                return data
            log.info('Waiting for pod to be deployed %r', data)
            return False

        try:
            return _wait_for_pod_deployment(pod_definition['id'])
        except retrying.RetryError as ex:
            raise Exception("Pod deployment failed - operation was not "
                            "completed in {} seconds.".format(timeout)) from ex
github dcos / dcos-e2e / src / dcos_e2e / _vendor / dcos_test_utils / jobs.py View on Github external
log.info('Job run {} finished.'.format(r_id))
                    return True
                else:
                    raise requests.HTTPError(
                        'Waiting for job run {} to be finished, but history for that job run is not available'
                        .format(r_id), response=rc)
            else:
                raise requests.HTTPError(
                    'Waiting for job run {} to be finished, but getting HTTP status code {}'
                    .format(r_id, rc.status_code), response=rc)

        try:
            # wait for the run to complete and then return the
            # run's result
            _wait_for_run_completion(job_id, run_id)
        except retrying.RetryError as ex:
            raise Exception("Job run failed - operation was not "
                            "completed in {} seconds.".format(timeout)) from ex
github dcos / dcos / test_util / marathon.py View on Github external
r = self.get('v2/deployments')
            assert r.ok, 'status_code: {} content: {}'.format(r.status_code, r.content)

            for deployment in r.json():
                if deployment_id == deployment.get('id'):
                    log.info('Waiting for pod to be destroyed')
                    return False
            log.info('Pod destroyed')
            return True

        r = self.delete('v2/pods' + pod_id)
        assert r.ok, 'status_code: {} content: {}'.format(r.status_code, r.content)

        try:
            _destroy_pod_complete(r.headers['Marathon-Deployment-Id'])
        except retrying.RetryError as ex:
            raise Exception("Pod destroy failed - operation was not "
                            "completed in {} seconds.".format(timeout)) from ex
github openstack / nova-powervm / nova_powervm / virt / powervm / vios.py View on Github external
def _wait_for_active_vioses():
        try:
            # Update the wrappers list and get the list of inactive
            # running VIOSes
            del vios_wraps[:]
            vios_wraps.extend(pvm_vios.VIOS.get(adapter))
            return get_inactive_running_vioses(vios_wraps)
        except Exception as e:
            LOG.exception(e)
            # If we errored then we want to keep retrying so return something
            # with a length greater than zero
            return [None]

    try:
        rmc_down_vioses = _wait_for_active_vioses()
    except retrying.RetryError:
        # This is thrown if we've hit our max retry count.  If so, no
        # issue... just continue
        pass

    if len(rmc_down_vioses) > 0 and rmc_down_vioses != [None]:
        LOG.warning(
            _LW('Timed out waiting for the RMC state of all the powered '
                'on Virtual I/O Servers to be active. Wait time was: '
                '%(time)s seconds. VIOSes that did not go active were: '
                '%(vioses)s.'),
            {'time': max_wait_time,
             'vioses': ', '.join([
                 vio.name for vio in rmc_down_vioses if vio is not None])})

    # If we didn't get a single active VIOS then raise an exception
    if not get_active_vioses(adapter, host_uuid, vios_wraps=vios_wraps):
github openstack / ironic / ironic / drivers / modules / ssh.py View on Github external
if (driver_utils.normalize_mac(host_mac)
                            in driver_utils.normalize_mac(node_mac)):
                        LOG.debug("Found Mac address: %s", node_mac)
                        matched_name = node
                        break

                if matched_name:
                    break
            if matched_name:
                break

        return matched_name

    try:
        return _with_retries()
    except retrying.RetryError:
        raise exception.NodeNotFound(
            _("SSH driver was not able to find a VM with any of the "
              "specified MACs: %(macs)s for node %(node)s.") %
            {'macs': driver_info['macs'], 'node': driver_info['uuid']})
github projecteru / eru-core / eru / helpers / network.py View on Github external
def bind_container_ip(container, cidrs, spec_ips=None):
    try:
        _bind_container_ip(cidrs, container, spec_ips=spec_ips)
    except retrying.RetryError:
        _log.info('still failed after 5 times retry, %s, %s' % (container.container_id, cidrs))
        pass
github openstack / nova-powervm / nova_powervm / virt / powervm / nvram / swift.py View on Github external
# TODO(arun-mani - Bug 1611011): Filed for updating swift
                    # client to return http status code in case of failure
                    if isinstance(result['error'], swft_exc.ClientException):
                        # If upload failed during nvram/slot_map update due to
                        # expired keystone token, retry swift-client operation
                        # to allow regeneration of token
                        LOG.warning('NVRAM upload failed due to invalid '
                                    'token. Retrying upload.')
                        return True
                    # The upload failed.
                    raise api.NVRAMUploadException(instance=inst_key,
                                                   reason=result)
            return False
        try:
            _run_upload_operation()
        except retrying.RetryError as re:
            # The upload failed.
            reason = (_('Unable to store NVRAM after %d attempts') %
                      re.last_attempt.attempt_number)
            raise api.NVRAMUploadException(instance=inst_key, reason=reason)
github Python3WebSpider / AdslProxy / adslproxy / sender / sender.py View on Github external
def run(self):
        """
        拨号主进程
        :return: None
        """
        logger.info('Dial started, remove proxy')
        try:
            self.remove_proxy()
        except RetryError:
            logger.error('Retried for max times, continue')
        # 拨号
        (status, output) = subprocess.getstatusoutput(DIAL_BASH)
        if not status == 0:
            logger.error('Dial failed')
        # 获取拨号 IP
        ip = self.extract_ip()
        if ip:
            logger.info(f'Get new IP {ip}')
            if PROXY_USERNAME and PROXY_PASSWORD:
                proxy = '{username}:{password}@{ip}:{port}'.format(username=PROXY_USERNAME,
                                                                   password=PROXY_PASSWORD,
                                                                   ip=ip, port=PROXY_PORT)
            else:
                proxy = '{ip}:{port}'.format(ip=ip, port=PROXY_PORT)
            time.sleep(10)
github boto / botoflow / botoflow / activity_retrying.py View on Github external
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.

import sys
from math import ceil

import retrying

from . import workflow_time
from .exceptions import ActivityTaskFailedError
from .core import coroutine, return_

__all__ = ('RetryError', 'Retrying')

RetryError = retrying.RetryError


def retry_on_exception(*args):
    """Takes a list of exceptions to retry on and retries, will automatically unpack ActivityTaskFailedError exceptions

    :param *args: exception classes to retry on
    :type *args: Iterable
    :rtype: function
    :return: callable function to be used with retry_on_exception in Retrying
    """
    def _retry_exceptions(exception):

        if isinstance(exception, ActivityTaskFailedError):
            exception = exception.cause

        if isinstance(exception, args):

retrying

Retrying

Apache-2.0
Latest version published 2 years ago

Package Health Score

70 / 100
Full package analysis

Similar packages