summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoris <j.h.hoendervangers@gmail.com>2023-04-01 14:43:47 +0200
committerGitHub <noreply@github.com>2023-04-01 19:43:47 +0700
commita9fae76e883eb1bba8e8f62406669371e12d1f4b (patch)
tree76d7f55230c4b6228e86a0cb9ebe20e581d567fa
parent04722339d7598ff0c52f11c3680ed2dd922e6768 (diff)
downloadrq-a9fae76e883eb1bba8e8f62406669371e12d1f4b.tar.gz
Fix error in example in the documentation (#1870)
The solution that @rpkak proposes works. Closes #1524
-rw-r--r--docs/docs/index.md8
1 files changed, 4 insertions, 4 deletions
diff --git a/docs/docs/index.md b/docs/docs/index.md
index 1c24c6b..e42539f 100644
--- a/docs/docs/index.md
+++ b/docs/docs/index.md
@@ -111,8 +111,8 @@ You can also enqueue multiple jobs in bulk with `queue.enqueue_many()` and `Queu
```python
jobs = q.enqueue_many(
[
- Queue.prepare_data(count_words_at_url, 'http://nvie.com', job_id='my_job_id'),
- Queue.prepare_data(count_words_at_url, 'http://nvie.com', job_id='my_other_job_id'),
+ Queue.prepare_data(count_words_at_url, ('http://nvie.com',), job_id='my_job_id'),
+ Queue.prepare_data(count_words_at_url, ('http://nvie.com',), job_id='my_other_job_id'),
]
)
```
@@ -123,8 +123,8 @@ which will enqueue all the jobs in a single redis `pipeline` which you can optio
with q.connection.pipeline() as pipe:
jobs = q.enqueue_many(
[
- Queue.prepare_data(count_words_at_url, 'http://nvie.com', job_id='my_job_id'),
- Queue.prepare_data(count_words_at_url, 'http://nvie.com', job_id='my_other_job_id'),
+ Queue.prepare_data(count_words_at_url, ('http://nvie.com',), job_id='my_job_id'),
+ Queue.prepare_data(count_words_at_url, ('http://nvie.com',), job_id='my_other_job_id'),
],
pipeline=pipe
)