Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_custom_meta_is_persisted(self):
"""Additional meta data on jobs are stored persisted correctly."""
job = Job.create(func=fixtures.say_hello, args=('Lionel',))
job.meta['foo'] = 'bar'
job.save()
raw_data = self.testconn.hget(job.key, 'meta')
self.assertEqual(loads(raw_data)['foo'], 'bar')
job2 = Job.fetch(job.id)
self.assertEqual(job2.meta['foo'], 'bar')
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def rq_job(self):
"""The last RQ Job this ran on"""
if not self.rq_id or not self.rq_origin:
return
try:
return RQJob.fetch(self.rq_id, connection=get_connection(self.rq_origin))
except NoSuchJobError:
return
registry = DeferredJobRegistry(queue.name, queue.connection)
items_per_page = 100
num_jobs = len(registry)
page = int(request.GET.get('page', 1))
jobs = []
if num_jobs > 0:
last_page = int(ceil(num_jobs / items_per_page))
page_range = range(1, last_page + 1)
offset = items_per_page * (page - 1)
job_ids = registry.get_job_ids(offset, offset + items_per_page - 1)
for job_id in job_ids:
try:
jobs.append(Job.fetch(job_id, connection=queue.connection))
except NoSuchJobError:
pass
else:
page_range = []
context_data = {
'queue': queue,
'queue_index': queue_index,
'jobs': jobs,
'num_jobs': num_jobs,
'page': page,
'page_range': page_range,
'job_status': 'Deferred',
}
return render(request, 'django_rq/jobs.html', context_data)
# turn into a list, just in case
jobs = list(jobs)
# cancel enqueued or scheduled jobs
for job in jobs:
# Try remove (cancel) a scheduled job in RQ-Scheduler. Behind
# the curtains, it accesses Redis' `zrem`, which ignores
# non-existing members of a set.
if scheduler.connection.zscore(scheduler.scheduled_jobs_key, job):
scheduler.cancel(job)
logger.debug("%s: scheduled job [%r] deleted", action_name, job)
try:
# fetch job from Reddit - if only it's already enqueued
enqueued_job = Job.fetch(job, connection=connection)
# we don't need to check if job is finished or failed, we
# can blindly delete it
enqueued_job.delete(remove_from_queue=True)
logger.debug("%s: enqueued job [%r] deleted", action_name, job)
except NoSuchJobError:
pass
# add message about removing the job
if request:
messages.info(
request,
format_html(
"Scheduled email {} was removed because action "
"conditions have changed. "
'<a href="{}">See other scheduled jobs</a>.',
"""Get all details for a run
Arguments:
request {[Django request]} -- The request object
Keyword Arguments:
pk {string} -- Id of the run
format {string} -- Output format to use (default: {None})
Returns:
Json -- Object containing all metrics for the pod
"""
run = ModelRun.objects.get(pk=pk)
redis_conn = django_rq.get_connection()
job = Job.fetch(run.job_id, redis_conn)
run.job_metadata = job.meta
serializer = ModelRunSerializer(run, many=False)
return Response(serializer.data, status=status.HTTP_200_OK)
def cancel(self, request, object_id):
"""Fetch job and re-try to execute it."""
rqjob = get_object_or_404(RQJob, id=object_id)
link = reverse('admin:autoemails_rqjob_preview', args=[object_id])
# fetch job
try:
job = Job.fetch(rqjob.job_id, connection=scheduler.connection)
except NoSuchJobError:
messages.warning(request, 'The corresponding job in Redis was '
'probably already executed.')
return redirect(link)
if job.is_queued or not job.get_status():
job.cancel() # for "pure" jobs
scheduler.cancel(job) # for scheduler-based jobs
messages.info(request,
f'The job {rqjob.job_id} was cancelled.')
elif job.is_started:
# Right now we don't know how to test a started job, so we simply
# don't allow such jobs to be cancelled.
messages.warning(request,
def get_job(self):
job_id = self.kwargs.get('job_id')
if not job_id:
job_id = self.get_job_id_from_address()
try:
return rq.job.Job.fetch(job_id, django_rq.get_connection())
except rq.exceptions.NoSuchJobError:
return