Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
side_effect=ConnectionError("Couldn't establish "
"Galaxy connection"))
@mock.patch.object(run_analysis, "retry", side_effect=None)
def test__check_galaxy_history_state_with_connection_error(
self,
retry_mock,
galaxy_progress_mock
):
_check_galaxy_history_state(self.analysis.uuid)
# Fetch analysis status since it has changed during
# the course of this test and the old `self` reference is stale
analysis_status = AnalysisStatus.objects.get(analysis=self.analysis)
self.assertEqual(analysis_status.galaxy_history_state,
AnalysisStatus.UNKNOWN)
def _get_galaxy_download_task_ids(analysis):
"""Get file import tasks for Galaxy analysis results"""
logger.debug("Preparing to download analysis results from Galaxy")
task_id_list = []
# retrieving list of files to download for workflow
tool = _get_workflow_tool(analysis.uuid)
try:
download_list = tool.create_analysis_output_node_connections()
except galaxy.client.ConnectionError as exc:
error_msg = \
"Error downloading Galaxy history files for analysis '%s': %s"
logger.error(error_msg, analysis.name, exc.message)
analysis.set_status(Analysis.FAILURE_STATUS, error_msg)
analysis.galaxy_cleanup()
return task_id_list
galaxy_instance = analysis.workflow.workflow_engine.instance
# Iterating through files in current galaxy history
for results in download_list:
# download file if result state is "ok"
if results['state'] == 'ok':
file_extension = results['file_ext']
# size of file defined by galaxy
file_size = results['file_size']
file_store_item = FileStoreItem(source=urlparse.urljoin(
done_count = 0
while done_count < len(jobs):
done_count = 0
for job in jobs:
job_id = job.get('id')
job_state = gi.jobs.show_job(job_id).get('state', '')
if job_state == 'ok':
done_count += 1
elif job_state == 'error':
done_count += 1
errored_dms.append({'dbkey': dbkey_name, 'DM': dm_tool})
log.debug("", extra={'same_line': True})
time.sleep(10)
log.debug("\tDbkey '{0}' installed successfully in '{1}'".format(
dbkey.get('dbkey'), dt.datetime.now() - start))
except ConnectionError, e:
response = None
end = dt.datetime.now()
log.error("\t* Error installing dbkey {0} for DM {1} (after {2}): {3}"
.format(dbkey_name, dm_tool, end - start, e.body))
errored_dms.append({'dbkey': dbkey_name, 'DM': dm_tool})
log.info("All dbkeys & DMs listed in '{0}' have been processed.".format(dbkeys_list_file))
log.info("Errored DMs: {0}".format(errored_dms))
log.info("Total run time: {0}".format(dt.datetime.now() - istart))
logger.debug("%s workflow engines found.", workflow_engines.count())
for workflow_engine in workflow_engines:
# Set keys of `workflow_data` to WorkflowEngine UUIDs to denote
# where workflows came from.
workflow_dict[workflow_engine.uuid] = []
logger.debug(
"Fetching workflows from workflow engine %s",
workflow_engine.name
)
galaxy_connection = workflow_engine.instance.galaxy_connection()
try:
workflows = galaxy_connection.workflows.get_workflows()
except ConnectionError as e:
raise RuntimeError(
"Unable to retrieve workflows from '{}' {}".format(
workflow_engine.instance.base_url, e
)
)
else:
for workflow in workflows:
workflow_data = galaxy_connection.workflows.show_workflow(
workflow["id"]
)
workflow_data["graph"] = (
galaxy_connection.workflows.export_workflow_dict(
workflow["id"]
)
)
def prepare_galaxy(self):
"""Prepare for analysis execution in Galaxy"""
error_msg = "Preparing Galaxy analysis failed: "
connection = self.galaxy_connection()
# creates new library in galaxy
library_name = "{} Analysis - {} ({})".format(
Site.objects.get_current().name, self.uuid,
get_aware_local_time())
try:
library = connection.libraries.create_library(library_name)
except galaxy.client.ConnectionError as exc:
logger.error(error_msg +
"can not create Galaxy library for analysis '%s': %s",
self.name, exc.message)
raise
# generates same ret_list purely based on analysis object
ret_list = self.get_config()
try:
workflow_dict = connection.workflows.export_workflow_json(
self.workflow.internal_id)
except galaxy.client.ConnectionError as exc:
logger.error(error_msg +
"can not download Galaxy workflow for analysis '%s': "
"%s", self.name, exc.message)
raise
def get_workflows(workflow_engine):
"""Retrieve workflows from Galaxy and import into Refinery"""
workflow_list = []
issues = []
connection = workflow_engine.instance.galaxy_connection()
try:
workflow_list = connection.workflows.get_workflows()
except galaxy.client.ConnectionError as exc:
logger.error("Unable to retrieve workflows from '%s' - skipping - %s",
workflow_engine.instance.base_url, exc)
else:
# deactivate existing workflows for this workflow engine: deleting
# workflows would lead to loss of the provenance information
Workflow.objects.filter(
workflow_engine=workflow_engine).update(is_active=False)
for workflow_entry in workflow_list:
workflow_object = GalaxyWorkflow(workflow_entry['name'],
workflow_entry['id'])
workflow_inputs = connection.workflows.show_workflow(
workflow_object.identifier)['inputs']
for input_identifier, input_description in workflow_inputs.items():
workflow_input = GalaxyWorkflowInput(input_description['label'],
input_identifier)
already_installed = True
break
if not already_installed:
# Initate tool installation
start = dt.datetime.now()
log.debug('(%s/%s) Installing tool %s from %s to section "%s" at '
'revision %s (TRT: %s)' %
(counter, total_num_tools, tool['name'], tool['owner'],
tool['tool_panel_section_id'] or tool['tool_panel_section_label'],
tool['revision'], dt.datetime.now() - istart))
try:
response = install_repository_revision(tool, tsc)
end = dt.datetime.now()
log_tool_install_success(tool=tool, start=start, end=end,
installed_tools=installed_tools)
except ConnectionError, e:
response = None
end = dt.datetime.now()
if default_err_msg in e.body:
log.debug("\tTool %s already installed (at revision %s)" %
(tool['name'], tool['revision']))
else:
if e.message == "Unexpected response from galaxy: 504":
log.debug("Timeout during install of %s, extending wait to 1h"
% ((tool['name'])))
success = wait_for_install(tool=tool, tsc=tsc, timeout=3600)
if success:
log_tool_install_success(tool=tool, start=start, end=end,
installed_tools=installed_tools)
response = e.body # TODO: find a better response message
else:
log_tool_install_error(tool=tool, start=start, end=end,
def install_done(tool, tool_shed_client):
try:
installed_repo_list = tool_shed_client.get_repositories()
except ConnectionError as e:
log.warning('Failed to get repositories list: %s', str(e))
return False
for installing_repo in installed_repo_list:
if (tool['name'] == installing_repo['name']) and (installing_repo['owner'] == tool['owner']):
if installing_repo['status'] not in ['Installed', 'Error']:
return False
return True
done_count = 0
while done_count < len(jobs):
done_count = 0
for job in jobs:
job_id = job.get('id')
job_state = gi.jobs.show_job(job_id).get('state', '')
if job_state == 'ok':
done_count += 1
elif job_state == 'error':
done_count += 1
errored_dms.append({'dbkey': dbkey_name, 'DM': dm_tool})
log.debug("", extra={'same_line': True})
time.sleep(10)
log.debug("\tDbkey '{0}' installed successfully in '{1}'".format(
dbkey.get('dbkey'), dt.datetime.now() - start))
except ConnectionError, e:
response = None
end = dt.datetime.now()
log.error("\t* Error installing dbkey {0} for DM {1} (after {2}): {3}"
.format(dbkey_name, dm_tool, end - start, e.body))
errored_dms.append({'dbkey': dbkey_name, 'DM': dm_tool})
log.info("All dbkeys & DMs listed in '{0}' have been processed.".format(dbkeys_list_file))
log.info("Errored DMs: {0}".format(errored_dms))
log.info("Total run time: {0}".format(dt.datetime.now() - istart))
msg = "Multiple repositories for name '%s', owner '%s' found in non-terminal states. Please uninstall all non-terminal repositories."
raise AssertionError(msg % (name, owner))
start = dt.datetime.now()
while (dt.datetime.now() - start) < dt.timedelta(seconds=timeout):
try:
installed_repo = self.tool_shed_client.show_repository(installing_repo_id)
status = installed_repo['status']
if status == 'Installed':
return True
elif status == 'Error':
return False
elif status in NON_TERMINAL_REPOSITORY_STATES:
time.sleep(10)
else:
raise AssertionError("Repository name '%s', owner '%s' in unknown status '%s'" % (name, owner, status))
except ConnectionError as e:
if log:
log.warning('Failed to get repositories list: %s', unicodify(e))
time.sleep(10)
return False