|
1 | 1 | from __future__ import absolute_import |
2 | 2 |
|
| 3 | +from itertools import cycle |
3 | 4 | import logging |
4 | 5 | import random |
5 | 6 | import six |
6 | 7 |
|
7 | | -from itertools import cycle |
8 | | - |
9 | 8 | from six.moves import xrange |
10 | 9 |
|
11 | | -from .base import ( |
12 | | - Producer, BATCH_SEND_DEFAULT_INTERVAL, |
13 | | - BATCH_SEND_MSG_COUNT, ASYNC_QUEUE_MAXSIZE, ASYNC_QUEUE_PUT_TIMEOUT, |
14 | | - ASYNC_RETRY_LIMIT, ASYNC_RETRY_BACKOFF_MS, ASYNC_RETRY_ON_TIMEOUTS |
15 | | -) |
| 10 | +from .base import Producer |
| 11 | + |
16 | 12 |
|
17 | 13 | log = logging.getLogger(__name__) |
18 | 14 |
|
19 | 15 |
|
20 | 16 | class SimpleProducer(Producer): |
21 | | - """ |
22 | | - A simple, round-robin producer. Each message goes to exactly one partition |
23 | | -
|
24 | | - Arguments: |
25 | | - client: The Kafka client instance to use |
26 | | -
|
27 | | - Keyword Arguments: |
28 | | - async: If True, the messages are sent asynchronously via another |
29 | | - thread (process). We will not wait for a response to these |
30 | | - req_acks: A value indicating the acknowledgements that the server must |
31 | | - receive before responding to the request |
32 | | - ack_timeout: Value (in milliseconds) indicating a timeout for waiting |
33 | | - for an acknowledgement |
34 | | - batch_send: If True, messages are send in batches |
35 | | - batch_send_every_n: If set, messages are send in batches of this size |
36 | | - batch_send_every_t: If set, messages are send after this timeout |
37 | | - random_start: If true, randomize the initial partition which the |
| 17 | + """A simple, round-robin producer. |
| 18 | +
|
| 19 | + See Producer class for Base Arguments |
| 20 | +
|
| 21 | + Additional Arguments: |
| 22 | + random_start (bool, optional): randomize the initial partition which |
38 | 23 | the first message block will be published to, otherwise |
39 | 24 | if false, the first message block will always publish |
40 | | - to partition 0 before cycling through each partition |
| 25 | + to partition 0 before cycling through each partition, |
| 26 | + defaults to True. |
41 | 27 | """ |
42 | | - def __init__(self, client, async=False, |
43 | | - req_acks=Producer.ACK_AFTER_LOCAL_WRITE, |
44 | | - ack_timeout=Producer.DEFAULT_ACK_TIMEOUT, |
45 | | - codec=None, |
46 | | - batch_send=False, |
47 | | - batch_send_every_n=BATCH_SEND_MSG_COUNT, |
48 | | - batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL, |
49 | | - random_start=True, |
50 | | - async_retry_limit=ASYNC_RETRY_LIMIT, |
51 | | - async_retry_backoff_ms=ASYNC_RETRY_BACKOFF_MS, |
52 | | - async_retry_on_timeouts=ASYNC_RETRY_ON_TIMEOUTS, |
53 | | - async_queue_maxsize=ASYNC_QUEUE_MAXSIZE, |
54 | | - async_queue_put_timeout=ASYNC_QUEUE_PUT_TIMEOUT): |
| 28 | + def __init__(self, *args, **kwargs): |
55 | 29 | self.partition_cycles = {} |
56 | | - self.random_start = random_start |
57 | | - super(SimpleProducer, self).__init__(client, req_acks, ack_timeout, |
58 | | - codec, async, batch_send, |
59 | | - batch_send_every_n, |
60 | | - batch_send_every_t, |
61 | | - async_retry_limit, |
62 | | - async_retry_backoff_ms, |
63 | | - async_retry_on_timeouts, |
64 | | - async_queue_maxsize, |
65 | | - async_queue_put_timeout) |
| 30 | + self.random_start = kwargs.pop('random_start', True) |
| 31 | + super(SimpleProducer, self).__init__(*args, **kwargs) |
66 | 32 |
|
67 | 33 | def _next_partition(self, topic): |
68 | 34 | if topic not in self.partition_cycles: |
|
0 commit comments