Cloud Defense Logo

Products

Solutions

Company

Book A Live Demo

Top 10 Examples of "rq in functional component" in Python

Dive into secure and efficient coding practices with our curated list of the top 10 examples showcasing 'rq' in functional components in Python. Our advanced machine learning engine meticulously scans each line of code, cross-referencing millions of open source libraries to ensure your implementation is not just functional, but also robust and secure. Elevate your React applications to new heights by mastering the art of handling side effects, API calls, and asynchronous operations with confidence and precision.

def test_transport_shutdown(sentry_init, capture_events_forksafe):
    sentry_init(integrations=[RqIntegration()])

    events = capture_events_forksafe()

    queue = rq.Queue(connection=FakeStrictRedis())
    worker = rq.Worker([queue], connection=queue.connection)

    queue.enqueue(crashing_job, foo=42)
    worker.work(burst=True)

    event = events.read_event()
    events.read_flush()

    (exception,) = event["exception"]["values"]
    assert exception["type"] == "ZeroDivisionError"
from rq.compat import as_text
from rq.defaults import DEFAULT_FAILURE_TTL
from rq.exceptions import InvalidJobOperation
from rq.job import Job, JobStatus, requeue_job
from rq.queue import Queue
from rq.utils import current_timestamp
from rq.worker import Worker
from rq.registry import (clean_registries, DeferredJobRegistry,
                         FailedJobRegistry, FinishedJobRegistry,
                         StartedJobRegistry)

from tests import RQTestCase
from tests.fixtures import div_by_zero, say_hello


class CustomJob(Job):
    """A custom job class just to test it"""


class TestRegistry(RQTestCase):

    def setUp(self):
        super(TestRegistry, self).setUp()
        self.registry = StartedJobRegistry(connection=self.testconn)

    def test_init(self):
        """Registry can be instantiated with queue or name/Redis connection"""
        queue = Queue('foo', connection=self.testconn)
        registry = StartedJobRegistry(queue=queue)
        self.assertEqual(registry.name, queue.name)
        self.assertEqual(registry.connection, queue.connection)
def test_all_custom_job(self):
        class CustomJob(Job):
            pass

        q = Queue('all-queue')
        q.enqueue(say_hello)
        queues = Queue.all(job_class=CustomJob)
        self.assertEqual(len(queues), 1)
        self.assertIs(queues[0].job_class, CustomJob)
def test_enqueue_scheduled_jobs(self):
        """Scheduler can enqueue scheduled jobs"""
        queue = Queue(connection=self.testconn)
        registry = ScheduledJobRegistry(queue=queue)
        job = Job.create('myfunc', connection=self.testconn)
        job.save()
        registry.schedule(job, datetime(2019, 1, 1, tzinfo=utc))
        scheduler = RQScheduler([queue], connection=self.testconn)
        scheduler.acquire_locks()
        scheduler.enqueue_scheduled_jobs()
        self.assertEqual(len(queue), 1)

        # After job is scheduled, registry should be empty
        self.assertEqual(len(registry), 0)

        # Jobs scheduled in the far future should not be affected
        registry.schedule(job, datetime(2100, 1, 1, tzinfo=utc))
        scheduler.enqueue_scheduled_jobs()
        self.assertEqual(len(queue), 1)
def test_enqueue_job_with_invalid_dependency(self):
        """Enqueuing a job fails, if the dependency does not exist at all."""
        parent_job = Job.create(func=say_hello)
        # without save() the job is not visible to others

        q = Queue()
        with self.assertRaises(NoSuchJobError):
            q.enqueue_call(say_hello, depends_on=parent_job)

        with self.assertRaises(NoSuchJobError):
            q.enqueue_call(say_hello, depends_on=parent_job.id)

        self.assertEqual(q.job_ids, [])
def test_failed_job_max_tries_2__retry_once_then_move_to_dlq():
    q = Queue()
    q2 = Queue('not_used')
    failed_q = get_failed_queue()
    dlq = Queue('dead_letter_queue')

    w = Worker([q])

    # Here the RetryWorker not listening on an active queue: it will not
    # run any jobs, just look to requeue failed jobs.
    rw = RetryWorker([q2], retry_config=dict(
        max_tries=2, maint_interval=0, delays=[]))

    # run job that will fail
    job = q.enqueue(error_fun)
    w.work(burst=True)
    assert q.count == 0
    assert get_failed_queue().count == 1

    # run retry worker
    rw.work(burst=True)
def test_find_by_key(self):
        """Worker.find_by_key restores queues, state and job_id."""
        queues = [Queue('foo'), Queue('bar')]
        w = Worker(queues)
        w.register_death()
        w.register_birth()
        w.set_state(WorkerStatus.STARTED)
        worker = Worker.find_by_key(w.key)
        self.assertEqual(worker.queues, queues)
        self.assertEqual(worker.get_state(), WorkerStatus.STARTED)
        self.assertEqual(worker._job_id, None)
        self.assertTrue(worker.key in Worker.all_keys(worker.connection))
        self.assertEqual(worker.version, VERSION)

        # If worker is gone, its keys should also be removed
        worker.connection.delete(worker.key)
        Worker.find_by_key(worker.key)
        self.assertFalse(worker.key in Worker.all_keys(worker.connection))

        self.assertRaises(ValueError, Worker.find_by_key, 'foo')
# With iterable of byte strings
            w = Worker(iter([b'foo', b'bar']))
            self.assertEqual(w.queues[0].name, 'foo')
            self.assertEqual(w.queues[1].name, 'bar')

        # With single Queue
        w = Worker(Queue('foo'))
        self.assertEqual(w.queues[0].name, 'foo')

        # With iterable of Queues
        w = Worker(iter([Queue('foo'), Queue('bar')]))
        self.assertEqual(w.queues[0].name, 'foo')
        self.assertEqual(w.queues[1].name, 'bar')

        # With list of Queues
        w = Worker([Queue('foo'), Queue('bar')])
        self.assertEqual(w.queues[0].name, 'foo')
        self.assertEqual(w.queues[1].name, 'bar')
def test_working_worker_warm_shutdown(self):
        """worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
        fooq = Queue('foo')
        w = Worker(fooq)

        sentinel_file = '/tmp/.rq_sentinel_warm'
        fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
        self.assertFalse(w._stop_requested)
        p = Process(target=kill_worker, args=(os.getpid(), False))
        p.start()

        w.work()

        p.join(2)
        self.assertFalse(p.is_alive())
        self.assertTrue(w._stop_requested)
        self.assertTrue(os.path.exists(sentinel_file))

        self.assertIsNotNone(w.shutdown_requested_date)
        self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime')
def test_self_modification_persistence(self):
        """Make sure that any meta modification done by
        the job itself persists completely through the
        queue/worker/job stack."""
        q = Queue()
        # Also make sure that previously existing metadata
        # persists properly
        job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
                        args=[{'baz': 10, 'newinfo': 'waka'}])

        w = Worker([q])
        w.work(burst=True)

        job_check = Job.fetch(job.id)
        self.assertEqual(job_check.meta['foo'], 'bar')
        self.assertEqual(job_check.meta['baz'], 10)
        self.assertEqual(job_check.meta['newinfo'], 'waka')

Is your System Free of Underlying Vulnerabilities?
Find Out Now