Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_initialization(self):
"""Test whether attributes are initializated"""
client = MockedClient(CLIENT_API_URL)
self.assertEqual(client.base_url, CLIENT_API_URL)
self.assertEqual(client.max_retries, HttpClient.MAX_RETRIES)
self.assertEqual(client.max_retries_on_connect, HttpClient.MAX_RETRIES_ON_CONNECT)
self.assertEqual(client.max_retries_on_read, HttpClient.MAX_RETRIES_ON_READ)
self.assertEqual(client.max_retries_on_redirect, HttpClient.MAX_RETRIES_ON_REDIRECT)
self.assertEqual(client.max_retries_on_read, HttpClient.MAX_RETRIES_ON_READ)
self.assertEqual(client.max_retries_on_status, HttpClient.MAX_RETRIES_ON_STATUS)
self.assertEqual(client.status_forcelist, HttpClient.DEFAULT_STATUS_FORCE_LIST)
self.assertEqual(client.retry_after_status, HttpClient.DEFAULT_RETRY_AFTER_STATUS_CODES)
self.assertEqual(client.method_whitelist, HttpClient.DEFAULT_METHOD_WHITELIST)
self.assertEqual(client.raise_on_redirect, HttpClient.DEFAULT_RAISE_ON_REDIRECT)
self.assertEqual(client.raise_on_status, HttpClient.DEFAULT_RAISE_ON_STATUS)
self.assertEqual(client.respect_retry_after_header, HttpClient.DEFAULT_RESPECT_RETRY_AFTER_HEADER)
self.assertEqual(client.sleep_time, HttpClient.DEFAULT_SLEEP_TIME)
self.assertIsNotNone(client.session)
self.assertEqual(client.session.headers['User-Agent'], HttpClient.DEFAULT_HEADERS.get('User-Agent'))
self.assertEqual(client.rate_limit, None)
self.assertEqual(client.rate_limit_reset_ts, None)
expected_retries = 5
expected_sleep_time = 100
expected_headers = {'User-Agent': 'ACME Corp.', 'Token': "your-token"}
extra_status = 555
client = MockedClient(CLIENT_API_URL,
def test_fetch_post(self):
"""Test fetch method"""
output = "success"
httpretty.register_uri(httpretty.POST,
CLIENT_SPIDERMAN_URL,
body=output,
status=200)
client = MockedClient(CLIENT_API_URL, sleep_time=0.1, max_retries=1)
response = client.fetch(CLIENT_SPIDERMAN_URL, method=HttpClient.POST)
self.assertEqual(response.request.method, HttpClient.POST)
self.assertEqual(response.text, output)
def test_initialization(self):
"""Test whether attributes are initializated"""
client = MockedClient(CLIENT_API_URL)
self.assertEqual(client.base_url, CLIENT_API_URL)
self.assertEqual(client.max_retries, HttpClient.MAX_RETRIES)
self.assertEqual(client.max_retries_on_connect, HttpClient.MAX_RETRIES_ON_CONNECT)
self.assertEqual(client.max_retries_on_read, HttpClient.MAX_RETRIES_ON_READ)
self.assertEqual(client.max_retries_on_redirect, HttpClient.MAX_RETRIES_ON_REDIRECT)
self.assertEqual(client.max_retries_on_read, HttpClient.MAX_RETRIES_ON_READ)
self.assertEqual(client.max_retries_on_status, HttpClient.MAX_RETRIES_ON_STATUS)
self.assertEqual(client.status_forcelist, HttpClient.DEFAULT_STATUS_FORCE_LIST)
self.assertEqual(client.retry_after_status, HttpClient.DEFAULT_RETRY_AFTER_STATUS_CODES)
self.assertEqual(client.method_whitelist, HttpClient.DEFAULT_METHOD_WHITELIST)
self.assertEqual(client.raise_on_redirect, HttpClient.DEFAULT_RAISE_ON_REDIRECT)
self.assertEqual(client.raise_on_status, HttpClient.DEFAULT_RAISE_ON_STATUS)
self.assertEqual(client.respect_retry_after_header, HttpClient.DEFAULT_RESPECT_RETRY_AFTER_HEADER)
self.assertEqual(client.sleep_time, HttpClient.DEFAULT_SLEEP_TIME)
self.assertIsNotNone(client.session)
self.assertEqual(client.session.headers['User-Agent'], HttpClient.DEFAULT_HEADERS.get('User-Agent'))
self.assertEqual(client.rate_limit, None)
self.assertEqual(client.rate_limit_reset_ts, None)
def fetch(self, url, payload=None, headers=None,
method=HttpClient.GET, stream=False, verify=True):
"""Override fetch method to handle API rate limit.
:param url: link to the resource
:param payload: payload of the request
:param headers: headers of the request
:param method: type of request call (GET or POST)
:param stream: defer downloading the response body until the response
content is available
:returns a response object
"""
if not self.from_archive:
self.sleep_for_rate_limit()
response = super().fetch(url, payload, headers, method, stream, verify)
def fetch(self, url, payload=None, headers=None, method=HttpClient.GET, stream=False, verify=True):
"""Fetch the data from a given URL.
:param url: link to the resource
:param payload: payload of the request
:param headers: headers of the request
:param method: type of request call (GET or POST)
:param stream: defer downloading the response body until the response content is available
:returns a response object
"""
if not self.from_archive:
self.sleep_for_rate_limit()
response = super().fetch(url, payload, headers, method, stream, verify)
if not self.from_archive:
return history
@staticmethod
def __parse_attachments(raw_attachments):
contents = json.loads(raw_attachments)['bugs']
attachments = {k: v for k, v in contents.items()}
return attachments
class BugzillaRESTError(BaseError):
"""Raised when an error occurs using the API"""
message = "%(error)s (code: %(code)s)"
class BugzillaRESTClient(HttpClient):
"""Bugzilla REST API client.
This class implements a simple client to retrieve distinct
kind of data from a Bugzilla > 5.0 repository using its
REST API.
When `user` and `password` parameters are given it logs in
the server. Further requests will use the token obtained
during the sign in phase.
:param base_url: URL of the Bugzilla server
:param user: Bugzilla user
:param password: user password
:param api_token: api token for user; when this is provided
`user` and `password` parameters will be ignored
:param archive: an archive to store/read fetched data
builds = builds.get('builds', [])
if not builds:
self.summary.skipped += 1
logger.debug("No builds for job %s", job['url'])
return builds
def _init_client(self, from_archive=False):
"""Init client"""
return JenkinsClient(self.url, self.user, self.api_token,
self.blacklist_ids, self.detail_depth, self.sleep_time,
archive=self.archive, from_archive=from_archive)
class JenkinsClient(HttpClient):
"""Jenkins API client.
This class implements a simple client to retrieve jobs/builds from
projects in a Jenkins node. The amount of data returned for each request
depends on the detail_depth value selected (minimum and default is 1).
Note that increasing the detail_depth may considerably slow down the
fetch operation and cause connection broken errors.
:param url: URL of jenkins node: https://build.opnfv.org/ci
:param user: Jenkins user
:param api_token: Jenkins auth token to access the API
:param blacklist_jobs: exclude the jobs of this list while fetching
:param detail_depth: set the detail level of the data returned by the API
:param sleep_time: time (in seconds) to sleep in case
of connection problems
:param archive: an archive to store/read fetched data
dict with the parsed data.
:param raw_json: JSON string to parse
:returns: a dict with the parsed data
"""
result = json.loads(raw_json)
return result
def _init_client(self, from_archive=False):
"""Init client"""
return DockerHubClient(archive=self.archive, from_archive=from_archive)
class DockerHubClient(HttpClient):
"""DockerHub API client.
Client for fetching information from the DockerHub server
using its REST API v2.
:param archive: an archive to store/read fetched data
:param from_archive: it tells whether to write/read the archive
"""
RREPOSITORY = 'repositories'
def __init__(self, archive=None, from_archive=False):
super().__init__(DOCKERHUB_API_URL, archive=archive, from_archive=from_archive)
def repository(self, owner, repository):
"""Fetch information about a repository."""
}
if not hit_string:
logger.warning("No hits for %s", self.keywords)
hits_json['hits'] = 0
return hits_json
str_hits = re.search(r'\d+', hit_string).group(0)
hits = int(str_hits)
hits_json['hits'] = hits
return hits_json
class GoogleHitsClient(HttpClient):
"""GoogleHits API client.
Client for fetching hits data from Google API.
:param sleep_time: time (in seconds) to sleep in case
of connection problems
:param max_retries: number of max retries to a data source
before raising a RetryError exception
:param archive: an archive to store/read fetched data
:param from_archive: it tells whether to write/read the archive
"""
EXTRA_STATUS_FORCELIST = [429]
def __init__(self, sleep_time=DEFAULT_SLEEP_TIME, max_retries=MAX_RETRIES,
archive=None, from_archive=False):
super().__init__(GOOGLE_SEARCH_URL, extra_status_forcelist=self.EXTRA_STATUS_FORCELIST,
page['update'] = None
if str(page["pageid"]) in reviews["query"]["pages"]:
reviews_json = reviews["query"]["pages"][str(page["pageid"])]
if 'revisions' in reviews_json:
page["revisions"] = reviews_json['revisions']
page['update'] = self.__get_max_date(page['revisions'])
else:
page = None
else:
logger.warning("Revisions not found in %s [page id: %s], page skipped",
page['title'], page['pageid'])
page = None
return page
class MediaWikiClient(HttpClient):
"""MediaWiki API client.
This class implements a simple client to retrieve pages from
projects in a MediaWiki node.
:param url: URL of mediawiki site: https://wiki.mozilla.org
:param archive: an archive to store/retrieved the fetched data
:param from_archive: define whether the archive is used to store/read data
:raises HTTPError: when an error occurs doing the request
"""
def __init__(self, url, archive=None, from_archive=False):
super().__init__(urijoin(url, "api.php"), archive=archive, from_archive=from_archive)
self.limit = "max" # Always get the max number of items