m2m模型翻译
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

71 lines
3.0 KiB

6 months ago
  1. from __future__ import absolute_import
  2. import collections
  3. import threading
  4. from kafka import errors as Errors
  5. from kafka.future import Future
  6. class FutureProduceResult(Future):
  7. def __init__(self, topic_partition):
  8. super(FutureProduceResult, self).__init__()
  9. self.topic_partition = topic_partition
  10. self._latch = threading.Event()
  11. def success(self, value):
  12. ret = super(FutureProduceResult, self).success(value)
  13. self._latch.set()
  14. return ret
  15. def failure(self, error):
  16. ret = super(FutureProduceResult, self).failure(error)
  17. self._latch.set()
  18. return ret
  19. def wait(self, timeout=None):
  20. # wait() on python2.6 returns None instead of the flag value
  21. return self._latch.wait(timeout) or self._latch.is_set()
  22. class FutureRecordMetadata(Future):
  23. def __init__(self, produce_future, relative_offset, timestamp_ms, checksum, serialized_key_size, serialized_value_size, serialized_header_size):
  24. super(FutureRecordMetadata, self).__init__()
  25. self._produce_future = produce_future
  26. # packing args as a tuple is a minor speed optimization
  27. self.args = (relative_offset, timestamp_ms, checksum, serialized_key_size, serialized_value_size, serialized_header_size)
  28. produce_future.add_callback(self._produce_success)
  29. produce_future.add_errback(self.failure)
  30. def _produce_success(self, offset_and_timestamp):
  31. offset, produce_timestamp_ms, log_start_offset = offset_and_timestamp
  32. # Unpacking from args tuple is minor speed optimization
  33. (relative_offset, timestamp_ms, checksum,
  34. serialized_key_size, serialized_value_size, serialized_header_size) = self.args
  35. # None is when Broker does not support the API (<0.10) and
  36. # -1 is when the broker is configured for CREATE_TIME timestamps
  37. if produce_timestamp_ms is not None and produce_timestamp_ms != -1:
  38. timestamp_ms = produce_timestamp_ms
  39. if offset != -1 and relative_offset is not None:
  40. offset += relative_offset
  41. tp = self._produce_future.topic_partition
  42. metadata = RecordMetadata(tp[0], tp[1], tp, offset, timestamp_ms, log_start_offset,
  43. checksum, serialized_key_size,
  44. serialized_value_size, serialized_header_size)
  45. self.success(metadata)
  46. def get(self, timeout=None):
  47. if not self.is_done and not self._produce_future.wait(timeout):
  48. raise Errors.KafkaTimeoutError(
  49. "Timeout after waiting for %s secs." % (timeout,))
  50. assert self.is_done
  51. if self.failed():
  52. raise self.exception # pylint: disable-msg=raising-bad-type
  53. return self.value
  54. RecordMetadata = collections.namedtuple(
  55. 'RecordMetadata', ['topic', 'partition', 'topic_partition', 'offset', 'timestamp', 'log_start_offset',
  56. 'checksum', 'serialized_key_size', 'serialized_value_size', 'serialized_header_size'])