Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"""
# Setup
self.repo_manager.create_repo('creeper')
for i in range(1, 6):
add_result('creeper', i)
# Test
entries = self.sync_manager.sync_history('creeper')
# Verify
self.assertEqual(5, len(entries))
# Verify descending order
for i in range(0, 4):
first = dateutils.parse_iso8601_datetime(entries[i]['completed'])
second = dateutils.parse_iso8601_datetime(entries[i + 1]['completed'])
self.assertTrue(first > second)
def associate_single_unit(repository, unit):
"""
Associate a single unit to a repository.
:param repository: The repository to update.
:type repository: pulp.server.db.model.Repository
:param unit: The unit to associate to the repository.
:type unit: pulp.server.db.model.ContentUnit
"""
current_timestamp = dateutils.now_utc_timestamp()
formatted_datetime = dateutils.format_iso8601_utc_timestamp(current_timestamp)
qs = model.RepositoryContentUnit.objects(
repo_id=repository.repo_id,
unit_id=unit.id,
unit_type_id=unit._content_type_id)
qs.update_one(
set_on_insert__created=formatted_datetime,
set__updated=formatted_datetime,
upsert=True)
:param args: Original arguments for the executed task.
:param kwargs: Original keyword arguments for the executed task.
:param einfo: celery's ExceptionInfo instance, containing serialized traceback.
"""
if isinstance(exc, PulpCodedException):
_logger.info(_('Task failed : [%(task_id)s] : %(msg)s') %
{'task_id': task_id, 'msg': str(exc)})
_logger.debug(traceback.format_exc())
else:
_logger.info(_('Task failed : [%s]') % task_id)
# celery will log the traceback
if kwargs.get('scheduled_call_id') is not None:
utils.increment_failure_count(kwargs['scheduled_call_id'])
if not self.request.called_directly:
now = datetime.now(dateutils.utc_tz())
finish_time = dateutils.format_iso8601_datetime(now)
task_status = TaskStatus.objects.get(task_id=task_id)
task_status['state'] = constants.CALL_ERROR_STATE
task_status['finish_time'] = finish_time
task_status['traceback'] = einfo.traceback
if not isinstance(exc, PulpException):
exc = PulpException(str(exc))
task_status['error'] = exc.to_dict()
task_status.save()
self._handle_cProfile(task_id)
common_utils.delete_working_directory()
def _now_timestamp(string=True):
"""
Return a current timestamp in iso8601 format.
:return: iso8601 UTC timestamp with timezone specified.
:rtype: str
"""
now = dateutils.now_utc_datetime_with_tzinfo()
if string:
return dateutils.format_iso8601_datetime(now)
else:
return now
def schedule(self, previous_run):
assert isinstance(previous_run, (types.NoneType, datetime.datetime))
if previous_run is None:
return (None, datetime.datetime.now(dateutils.utc_tz()))
return (None, None)
def _cmp_history(h1, h2):
t1 = dateutils.parse_iso8601_datetime(h1['timestamp'])
t2 = dateutils.parse_iso8601_datetime(h2['timestamp'])
return cmp(t1, t2)
def __init__(self, consumer_id, originator, event_type, details):
super(ConsumerHistoryEvent, self).__init__()
self.consumer_id = consumer_id
self.originator = originator
self.type = event_type
self.details = details
now = datetime.datetime.now(dateutils.utc_tz())
self.timestamp = dateutils.format_iso8601_datetime(now)
def __update(self, body):
self.__lock()
try:
log.debug(body)
uuid = body.pop('uuid')
next = body.pop('next')
last = dt.now(dateutils.utc_tz())
next = int(next*1.20)
next = last+timedelta(seconds=next)
self.__status[uuid] = (last, next, body)
finally:
self.__unlock()
for task state tracking of Pulp tasks.
"""
# Check task status and skip running the task if task state is 'canceled'.
try:
task_status = TaskStatus.objects.get(task_id=self.request.id)
except DoesNotExist:
task_status = None
if task_status and task_status['state'] == constants.CALL_CANCELED_STATE:
_logger.debug("Task cancel received for task-id : [%s]" % self.request.id)
return
# Update start_time and set the task state to 'running' for asynchronous tasks.
# Also update the worker_name to cover cases where apply_async was called without
# providing the worker name up-front. Skip updating status for eagerly executed tasks,
# since we don't want to track synchronous tasks in our database.
if not self.request.called_directly:
now = datetime.now(dateutils.utc_tz())
start_time = dateutils.format_iso8601_datetime(now)
worker_name = self.request.hostname
# Using 'upsert' to avoid a possible race condition described in the apply_async method
# above.
TaskStatus.objects(task_id=self.request.id).update_one(
set__state=constants.CALL_RUNNING_STATE,
set__start_time=start_time,
set__worker_name=worker_name,
upsert=True)
# Run the actual task
_logger.debug("Running task : [%s]" % self.request.id)
if config.getboolean('profiling', 'enabled') is True:
self.pr = cProfile.Profile()
self.pr.enable()