Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_task_metrics_from_date(model, date, output_directory):
options = get_taskcluster_options()
index = taskcluster.Index(options)
index.ping()
# Split the date
from_date = date.split(".")
namespaces = []
# Start at the root level
# We need an empty list in order to append namespaces part to it
namespaces.append([])
# Recursively list all namespaces greater or equals than the given date
while namespaces:
current_ns = namespaces.pop()
def query_taskcluster_for_test_packages_url(properties):
"""Return the URL of the test packages JSON file."""
queue = taskcluster.Queue()
route = "gecko.v2.{branch}.nightly.revision.{revision}.firefox.{platform}-opt"
task_id = taskcluster.Index().findTask(route.format(**properties))['taskId']
artifacts = queue.listLatestArtifacts(task_id)["artifacts"]
for artifact in artifacts:
if artifact['name'].endswith("test_packages.json"):
return queue.buildUrl('getLatestArtifact', task_id, artifact["name"])
break
return None
def find_decision_task_id(trust_domain, project, revision):
decision_task_route = "{trust_domain}.v2.{project}.revision.{revision}.taskgraph.decision".format(
trust_domain=trust_domain,
project=project,
revision=revision,
)
index = taskcluster.Index()
def _get():
return index.findTask(decision_task_route)["taskId"]
return retry(_get)
def __init__(self):
self.now = datetime.datetime.utcnow()
self.tasks_cache = {}
self.found_or_created_indexed_tasks = {}
self.all_tasks = []
# taskclusterProxy URLs:
# https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/features
self.queue_service = taskcluster.Queue(options={"baseUrl": "http://taskcluster/queue/v1/"})
self.index_service = taskcluster.Index(options={"baseUrl": "http://taskcluster/index/v1/"})
Bug 1284236 - Not all Taskcluster builds report correctly to the Index.
To ensure we get a TC build force to Linux64 debug for now.
:param properties: Properties of the build and necessary resources.
"""
build_index = 'gecko.v2.{branch}.revision.{rev}.firefox.{platform}-debug'.format(
branch=properties['branch'],
rev=properties['revision'],
platform=properties['platform'],
)
try:
logger.debug('Querying Taskcluster for "desktop-test" docker image for "{}"...'.format(
properties['branch']))
build_task_id = taskcluster.Index().findTask(build_index)['taskId']
except taskcluster.exceptions.TaskclusterFailure:
raise errors.NotFoundException('Required build not found for TC index', build_index)
task_id = None
continuation_token = None
while not task_id:
options = {'limit': 5}
if continuation_token:
options.update({'continuationToken': continuation_token})
resp = taskcluster.Queue().listDependentTasks(build_task_id,
options=options)
for task in resp['tasks']:
if task['task'].get('extra', {}).get('suite', {}).get('name') == 'firefox-ui':
task_id = task['status']['taskId']
break
def tc_branches():
decision_namespace = 'gecko.v2.%s.latest.firefox.decision'
index = taskcluster.Index()
queue = taskcluster.Queue()
result = index.listNamespaces('gecko.v2', dict(limit=1000))
branches = {
i['name']: dict(name=i['name'], workerTypes=dict())
for i in result.get('namespaces', [])
}
for branchName, branch in branches.items():
# decision task might not exist
try:
decision_task = index.findTask(decision_namespace % branchName)
decision_graph = queue.getLatestArtifact(
decision_task['taskId'], 'public/graph.json')
def __init__(self):
self.now = datetime.datetime.utcnow()
self.tasks_cache = {}
self.found_or_created_indexed_tasks = {}
self.all_tasks = []
# taskclusterProxy URLs:
# https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/features
self.queue_service = taskcluster.Queue(options={"baseUrl": "http://taskcluster/queue/v1/"})
self.index_service = taskcluster.Index(options={"baseUrl": "http://taskcluster/index/v1/"})
# https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
# This is a stopgap until Bug 1259627 is fixed.
retrying_tc_config = tc_config.copy()
retrying_tc_config.update({"maxRetries": 12})
balrog_username = config['balrog'].get("username")
balrog_password = config["balrog"].get("password")
extra_balrog_submitter_params = config["balrog"].get("extra_balrog_submitter_params", "")
beetmover_aws_access_key_id = config["beetmover"].get("aws_access_key_id")
beetmover_aws_secret_access_key = config["beetmover"].get("aws_secret_access_key")
gpg_key_path = config["signing"].get("gpg_key_path")
# TODO: replace release sanity with direct checks of en-US and l10n
# revisions (and other things if needed)
rr = ReleaseRunner(api_root=api_root, username=username, password=password)
index = Index(tc_config)
queue = Queue(retrying_tc_config)
# Main loop waits for new releases, processes them and exits.
while True:
try:
log.debug('Fetching release requests')
rr.get_release_requests([r['pattern'] for r in config['releases']])
if rr.new_releases:
new_releases = run_prebuild_sanity_checks(
rr, config['releases'])
break
else:
log.debug('Sleeping for %d seconds before polling again' %
sleeptime)
time.sleep(sleeptime)
except:
def _ensure_taskcluster_connection(self, taskcluster_url):
index = taskcluster.Index(options={'baseUrl': taskcluster_url})
# This will raise a subclass of TaskclusterFailure if things go wrong.
index.ping()