Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
message = data.get('message')
else:
if status == 401:
message = "Authentication Error"
elif status == 500:
message = "Internal Server Error"
# Raise TaskclusterAuthFailure if this is an auth issue
if status == 401:
raise exceptions.TaskclusterAuthFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Raise TaskclusterRestFailure for all other issues
raise exceptions.TaskclusterRestFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Try to load JSON
try:
return response.json()
except ValueError:
return {"response": response}
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!"
tomorrow = now + datetime.timedelta(hours=24)
task['created'] = taskcluster_client.stringDate(now)
task['deadline'] = taskcluster_client.stringDate(tomorrow)
LOG.debug("Contents of new task: (Limit 1024 char)")
LOG.debug(str(task)[:1024])
if not dry_run:
LOG.info("Attempting to schedule new task with task_id: {}".format(new_task_id))
result = queue.createTask(new_task_id, task)
LOG.debug(json.dumps(result))
LOG.info("{}/task-inspector/#{}".format(TASKCLUSTER_TOOLS_HOST, new_task_id))
else:
LOG.info("Dry-run mode: Nothing was retriggered.")
except taskcluster_client.exceptions.TaskclusterRestFailure as e:
traceback.print_exc()
new_task_id = -1
except taskcluster_client.exceptions.TaskclusterAuthFailure as e:
# Hack until we fix it in the issue
if str(e) == "Authorization Failed":
LOG.error("The taskclaster client that you specified is lacking "
"the right set of scopes.")
LOG.error("Run this same command with --debug and you will see "
"the missing scopes (the output comes from the "
"taskcluster python client)")
elif str(e) == "Authentication Error":
LOG.error("Make sure that you create permanent credentials and you "
"set these environment variables: TASKCLUSTER_CLIENT_ID & "
"TASKCLUSTER_ACCESS_TOKEN")
new_task_id = -1
# raise a connection exception
raise rerr
except ValueError as rerr:
log.warn('ValueError from aiohttp: redirect to non-http or https')
raise rerr
except RuntimeError as rerr:
log.warn('RuntimeError from aiohttp: session closed')
raise rerr
# Handle non 2xx status code and retry if possible
status = response.status
if 500 <= status and status < 600 and retry < retries:
if retry < retries:
log.warn('Retrying because of: %d status' % status)
continue
else:
raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None)
return response
finally:
cleanup()
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!"
continue
# Lookup artifact in cache
path = os.path.join(self.cache_dir, task["taskId"])
if os.path.exists(path):
artifact = json.load(open(path))
else:
# Download the task report
logging.info(f"Download task {task['taskId']}")
try:
artifact = queue.getLatestArtifact(
task["taskId"], "public/results/report.json"
)
except taskcluster.exceptions.TaskclusterRestFailure as e:
if e.status_code == 404:
logging.info("Missing artifact")
continue
raise
# Check the artifact has repositories & revision
revision = artifact["revision"]
assert "repository" in revision, "Missing repository"
assert "target_repository" in revision, "Missing target_repository"
assert (
"mercurial_revision" in revision
), "Missing mercurial_revision"
# Store artifact in cache
with open(path, "w") as f:
json.dump(artifact, f, sort_keys=True, indent=4)
message = data.get('message')
else:
if status == 401:
message = "Authentication Error"
elif status == 500:
message = "Internal Server Error"
# Raise TaskclusterAuthFailure if this is an auth issue
if status == 401:
raise exceptions.TaskclusterAuthFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Raise TaskclusterRestFailure for all other issues
raise exceptions.TaskclusterRestFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Try to load JSON
try:
return response.json()
except ValueError:
return {"response": response}
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!"
def _fetch_completed_tasks(self):
log.debug('Release "%s" still has to find tasks for %s',
self.release_name, self.platforms_to_check)
for platform in list(self.platforms_to_check):
try:
if self._all_tasks_ready(platform):
log.info("All tasks for %s are ready", platform)
self.platforms_to_check.remove(platform)
except TaskclusterRestFailure:
log.debug('Task for platform "%s" is not yet created for '
'release "%s"', platform, self.release_name)
result = index.listNamespaces('gecko.v2', dict(limit=1000))
branches = {
i['name']: dict(name=i['name'], workerTypes=dict())
for i in result.get('namespaces', [])
}
for branchName, branch in branches.items():
# decision task might not exist
try:
decision_task = index.findTask(decision_namespace % branchName)
decision_graph = queue.getLatestArtifact(
decision_task['taskId'], 'public/graph.json')
except taskcluster.exceptions.TaskclusterRestFailure:
continue
for task in decision_graph.get('tasks', []):
task = task['task']
task_cache = task.get('payload', dict()).get('cache', dict())
provisionerId = task.get('provisionerId')
if provisionerId:
branch['provisionerId'] = provisionerId
workerType = task.get('workerType')
if workerType:
branch['workerTypes'].setdefault(
workerType, dict(name=workerType, caches=[]))
if len(task_cache) > 0: