图片解析应用
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1023 lines
45 KiB

  1. from __future__ import absolute_import, division
  2. import abc
  3. import copy
  4. import logging
  5. import threading
  6. import time
  7. import weakref
  8. from kafka.vendor import six
  9. from kafka.coordinator.heartbeat import Heartbeat
  10. from kafka import errors as Errors
  11. from kafka.future import Future
  12. from kafka.metrics import AnonMeasurable
  13. from kafka.metrics.stats import Avg, Count, Max, Rate
  14. from kafka.protocol.commit import GroupCoordinatorRequest, OffsetCommitRequest
  15. from kafka.protocol.group import (HeartbeatRequest, JoinGroupRequest,
  16. LeaveGroupRequest, SyncGroupRequest)
  17. log = logging.getLogger('kafka.coordinator')
  18. class MemberState(object):
  19. UNJOINED = '<unjoined>' # the client is not part of a group
  20. REBALANCING = '<rebalancing>' # the client has begun rebalancing
  21. STABLE = '<stable>' # the client has joined and is sending heartbeats
  22. class Generation(object):
  23. def __init__(self, generation_id, member_id, protocol):
  24. self.generation_id = generation_id
  25. self.member_id = member_id
  26. self.protocol = protocol
  27. Generation.NO_GENERATION = Generation(
  28. OffsetCommitRequest[2].DEFAULT_GENERATION_ID,
  29. JoinGroupRequest[0].UNKNOWN_MEMBER_ID,
  30. None)
  31. class UnjoinedGroupException(Errors.KafkaError):
  32. retriable = True
  33. class BaseCoordinator(object):
  34. """
  35. BaseCoordinator implements group management for a single group member
  36. by interacting with a designated Kafka broker (the coordinator). Group
  37. semantics are provided by extending this class. See ConsumerCoordinator
  38. for example usage.
  39. From a high level, Kafka's group management protocol consists of the
  40. following sequence of actions:
  41. 1. Group Registration: Group members register with the coordinator providing
  42. their own metadata (such as the set of topics they are interested in).
  43. 2. Group/Leader Selection: The coordinator select the members of the group
  44. and chooses one member as the leader.
  45. 3. State Assignment: The leader collects the metadata from all the members
  46. of the group and assigns state.
  47. 4. Group Stabilization: Each member receives the state assigned by the
  48. leader and begins processing.
  49. To leverage this protocol, an implementation must define the format of
  50. metadata provided by each member for group registration in
  51. :meth:`.group_protocols` and the format of the state assignment provided by
  52. the leader in :meth:`._perform_assignment` and which becomes available to
  53. members in :meth:`._on_join_complete`.
  54. Note on locking: this class shares state between the caller and a background
  55. thread which is used for sending heartbeats after the client has joined the
  56. group. All mutable state as well as state transitions are protected with the
  57. class's monitor. Generally this means acquiring the lock before reading or
  58. writing the state of the group (e.g. generation, member_id) and holding the
  59. lock when sending a request that affects the state of the group
  60. (e.g. JoinGroup, LeaveGroup).
  61. """
  62. DEFAULT_CONFIG = {
  63. 'group_id': 'kafka-python-default-group',
  64. 'session_timeout_ms': 10000,
  65. 'heartbeat_interval_ms': 3000,
  66. 'max_poll_interval_ms': 300000,
  67. 'retry_backoff_ms': 100,
  68. 'api_version': (0, 10, 1),
  69. 'metric_group_prefix': '',
  70. }
  71. def __init__(self, client, metrics, **configs):
  72. """
  73. Keyword Arguments:
  74. group_id (str): name of the consumer group to join for dynamic
  75. partition assignment (if enabled), and to use for fetching and
  76. committing offsets. Default: 'kafka-python-default-group'
  77. session_timeout_ms (int): The timeout used to detect failures when
  78. using Kafka's group management facilities. Default: 30000
  79. heartbeat_interval_ms (int): The expected time in milliseconds
  80. between heartbeats to the consumer coordinator when using
  81. Kafka's group management feature. Heartbeats are used to ensure
  82. that the consumer's session stays active and to facilitate
  83. rebalancing when new consumers join or leave the group. The
  84. value must be set lower than session_timeout_ms, but typically
  85. should be set no higher than 1/3 of that value. It can be
  86. adjusted even lower to control the expected time for normal
  87. rebalances. Default: 3000
  88. retry_backoff_ms (int): Milliseconds to backoff when retrying on
  89. errors. Default: 100.
  90. """
  91. self.config = copy.copy(self.DEFAULT_CONFIG)
  92. for key in self.config:
  93. if key in configs:
  94. self.config[key] = configs[key]
  95. if self.config['api_version'] < (0, 10, 1):
  96. if self.config['max_poll_interval_ms'] != self.config['session_timeout_ms']:
  97. raise Errors.KafkaConfigurationError("Broker version %s does not support "
  98. "different values for max_poll_interval_ms "
  99. "and session_timeout_ms")
  100. self._client = client
  101. self.group_id = self.config['group_id']
  102. self.heartbeat = Heartbeat(**self.config)
  103. self._heartbeat_thread = None
  104. self._lock = threading.Condition()
  105. self.rejoin_needed = True
  106. self.rejoining = False # renamed / complement of java needsJoinPrepare
  107. self.state = MemberState.UNJOINED
  108. self.join_future = None
  109. self.coordinator_id = None
  110. self._find_coordinator_future = None
  111. self._generation = Generation.NO_GENERATION
  112. self.sensors = GroupCoordinatorMetrics(self.heartbeat, metrics,
  113. self.config['metric_group_prefix'])
  114. @abc.abstractmethod
  115. def protocol_type(self):
  116. """
  117. Unique identifier for the class of supported protocols
  118. (e.g. "consumer" or "connect").
  119. Returns:
  120. str: protocol type name
  121. """
  122. pass
  123. @abc.abstractmethod
  124. def group_protocols(self):
  125. """Return the list of supported group protocols and metadata.
  126. This list is submitted by each group member via a JoinGroupRequest.
  127. The order of the protocols in the list indicates the preference of the
  128. protocol (the first entry is the most preferred). The coordinator takes
  129. this preference into account when selecting the generation protocol
  130. (generally more preferred protocols will be selected as long as all
  131. members support them and there is no disagreement on the preference).
  132. Note: metadata must be type bytes or support an encode() method
  133. Returns:
  134. list: [(protocol, metadata), ...]
  135. """
  136. pass
  137. @abc.abstractmethod
  138. def _on_join_prepare(self, generation, member_id):
  139. """Invoked prior to each group join or rejoin.
  140. This is typically used to perform any cleanup from the previous
  141. generation (such as committing offsets for the consumer)
  142. Arguments:
  143. generation (int): The previous generation or -1 if there was none
  144. member_id (str): The identifier of this member in the previous group
  145. or '' if there was none
  146. """
  147. pass
  148. @abc.abstractmethod
  149. def _perform_assignment(self, leader_id, protocol, members):
  150. """Perform assignment for the group.
  151. This is used by the leader to push state to all the members of the group
  152. (e.g. to push partition assignments in the case of the new consumer)
  153. Arguments:
  154. leader_id (str): The id of the leader (which is this member)
  155. protocol (str): the chosen group protocol (assignment strategy)
  156. members (list): [(member_id, metadata_bytes)] from
  157. JoinGroupResponse. metadata_bytes are associated with the chosen
  158. group protocol, and the Coordinator subclass is responsible for
  159. decoding metadata_bytes based on that protocol.
  160. Returns:
  161. dict: {member_id: assignment}; assignment must either be bytes
  162. or have an encode() method to convert to bytes
  163. """
  164. pass
  165. @abc.abstractmethod
  166. def _on_join_complete(self, generation, member_id, protocol,
  167. member_assignment_bytes):
  168. """Invoked when a group member has successfully joined a group.
  169. Arguments:
  170. generation (int): the generation that was joined
  171. member_id (str): the identifier for the local member in the group
  172. protocol (str): the protocol selected by the coordinator
  173. member_assignment_bytes (bytes): the protocol-encoded assignment
  174. propagated from the group leader. The Coordinator instance is
  175. responsible for decoding based on the chosen protocol.
  176. """
  177. pass
  178. def coordinator_unknown(self):
  179. """Check if we know who the coordinator is and have an active connection
  180. Side-effect: reset coordinator_id to None if connection failed
  181. Returns:
  182. bool: True if the coordinator is unknown
  183. """
  184. return self.coordinator() is None
  185. def coordinator(self):
  186. """Get the current coordinator
  187. Returns: the current coordinator id or None if it is unknown
  188. """
  189. if self.coordinator_id is None:
  190. return None
  191. elif self._client.is_disconnected(self.coordinator_id):
  192. self.coordinator_dead('Node Disconnected')
  193. return None
  194. else:
  195. return self.coordinator_id
  196. def ensure_coordinator_ready(self):
  197. """Block until the coordinator for this group is known
  198. (and we have an active connection -- java client uses unsent queue).
  199. """
  200. with self._client._lock, self._lock:
  201. while self.coordinator_unknown():
  202. # Prior to 0.8.2 there was no group coordinator
  203. # so we will just pick a node at random and treat
  204. # it as the "coordinator"
  205. if self.config['api_version'] < (0, 8, 2):
  206. self.coordinator_id = self._client.least_loaded_node()
  207. if self.coordinator_id is not None:
  208. self._client.maybe_connect(self.coordinator_id)
  209. continue
  210. future = self.lookup_coordinator()
  211. self._client.poll(future=future)
  212. if future.failed():
  213. if future.retriable():
  214. if getattr(future.exception, 'invalid_metadata', False):
  215. log.debug('Requesting metadata for group coordinator request: %s', future.exception)
  216. metadata_update = self._client.cluster.request_update()
  217. self._client.poll(future=metadata_update)
  218. else:
  219. time.sleep(self.config['retry_backoff_ms'] / 1000)
  220. else:
  221. raise future.exception # pylint: disable-msg=raising-bad-type
  222. def _reset_find_coordinator_future(self, result):
  223. self._find_coordinator_future = None
  224. def lookup_coordinator(self):
  225. with self._lock:
  226. if self._find_coordinator_future is not None:
  227. return self._find_coordinator_future
  228. # If there is an error sending the group coordinator request
  229. # then _reset_find_coordinator_future will immediately fire and
  230. # set _find_coordinator_future = None
  231. # To avoid returning None, we capture the future in a local variable
  232. future = self._send_group_coordinator_request()
  233. self._find_coordinator_future = future
  234. self._find_coordinator_future.add_both(self._reset_find_coordinator_future)
  235. return future
  236. def need_rejoin(self):
  237. """Check whether the group should be rejoined (e.g. if metadata changes)
  238. Returns:
  239. bool: True if it should, False otherwise
  240. """
  241. return self.rejoin_needed
  242. def poll_heartbeat(self):
  243. """
  244. Check the status of the heartbeat thread (if it is active) and indicate
  245. the liveness of the client. This must be called periodically after
  246. joining with :meth:`.ensure_active_group` to ensure that the member stays
  247. in the group. If an interval of time longer than the provided rebalance
  248. timeout (max_poll_interval_ms) expires without calling this method, then
  249. the client will proactively leave the group.
  250. Raises: RuntimeError for unexpected errors raised from the heartbeat thread
  251. """
  252. with self._lock:
  253. if self._heartbeat_thread is not None:
  254. if self._heartbeat_thread.failed:
  255. # set the heartbeat thread to None and raise an exception.
  256. # If the user catches it, the next call to ensure_active_group()
  257. # will spawn a new heartbeat thread.
  258. cause = self._heartbeat_thread.failed
  259. self._heartbeat_thread = None
  260. raise cause # pylint: disable-msg=raising-bad-type
  261. # Awake the heartbeat thread if needed
  262. if self.heartbeat.should_heartbeat():
  263. self._lock.notify()
  264. self.heartbeat.poll()
  265. def time_to_next_heartbeat(self):
  266. """Returns seconds (float) remaining before next heartbeat should be sent
  267. Note: Returns infinite if group is not joined
  268. """
  269. with self._lock:
  270. # if we have not joined the group, we don't need to send heartbeats
  271. if self.state is MemberState.UNJOINED:
  272. return float('inf')
  273. return self.heartbeat.time_to_next_heartbeat()
  274. def _handle_join_success(self, member_assignment_bytes):
  275. with self._lock:
  276. log.info("Successfully joined group %s with generation %s",
  277. self.group_id, self._generation.generation_id)
  278. self.state = MemberState.STABLE
  279. self.rejoin_needed = False
  280. if self._heartbeat_thread:
  281. self._heartbeat_thread.enable()
  282. def _handle_join_failure(self, _):
  283. with self._lock:
  284. self.state = MemberState.UNJOINED
  285. def ensure_active_group(self):
  286. """Ensure that the group is active (i.e. joined and synced)"""
  287. with self._client._lock, self._lock:
  288. if self._heartbeat_thread is None:
  289. self._start_heartbeat_thread()
  290. while self.need_rejoin() or self._rejoin_incomplete():
  291. self.ensure_coordinator_ready()
  292. # call on_join_prepare if needed. We set a flag
  293. # to make sure that we do not call it a second
  294. # time if the client is woken up before a pending
  295. # rebalance completes. This must be called on each
  296. # iteration of the loop because an event requiring
  297. # a rebalance (such as a metadata refresh which
  298. # changes the matched subscription set) can occur
  299. # while another rebalance is still in progress.
  300. if not self.rejoining:
  301. self._on_join_prepare(self._generation.generation_id,
  302. self._generation.member_id)
  303. self.rejoining = True
  304. # ensure that there are no pending requests to the coordinator.
  305. # This is important in particular to avoid resending a pending
  306. # JoinGroup request.
  307. while not self.coordinator_unknown():
  308. if not self._client.in_flight_request_count(self.coordinator_id):
  309. break
  310. self._client.poll()
  311. else:
  312. continue
  313. # we store the join future in case we are woken up by the user
  314. # after beginning the rebalance in the call to poll below.
  315. # This ensures that we do not mistakenly attempt to rejoin
  316. # before the pending rebalance has completed.
  317. if self.join_future is None:
  318. # Fence off the heartbeat thread explicitly so that it cannot
  319. # interfere with the join group. Note that this must come after
  320. # the call to _on_join_prepare since we must be able to continue
  321. # sending heartbeats if that callback takes some time.
  322. self._heartbeat_thread.disable()
  323. self.state = MemberState.REBALANCING
  324. future = self._send_join_group_request()
  325. self.join_future = future # this should happen before adding callbacks
  326. # handle join completion in the callback so that the
  327. # callback will be invoked even if the consumer is woken up
  328. # before finishing the rebalance
  329. future.add_callback(self._handle_join_success)
  330. # we handle failures below after the request finishes.
  331. # If the join completes after having been woken up, the
  332. # exception is ignored and we will rejoin
  333. future.add_errback(self._handle_join_failure)
  334. else:
  335. future = self.join_future
  336. self._client.poll(future=future)
  337. if future.succeeded():
  338. self._on_join_complete(self._generation.generation_id,
  339. self._generation.member_id,
  340. self._generation.protocol,
  341. future.value)
  342. self.join_future = None
  343. self.rejoining = False
  344. else:
  345. self.join_future = None
  346. exception = future.exception
  347. if isinstance(exception, (Errors.UnknownMemberIdError,
  348. Errors.RebalanceInProgressError,
  349. Errors.IllegalGenerationError)):
  350. continue
  351. elif not future.retriable():
  352. raise exception # pylint: disable-msg=raising-bad-type
  353. time.sleep(self.config['retry_backoff_ms'] / 1000)
  354. def _rejoin_incomplete(self):
  355. return self.join_future is not None
  356. def _send_join_group_request(self):
  357. """Join the group and return the assignment for the next generation.
  358. This function handles both JoinGroup and SyncGroup, delegating to
  359. :meth:`._perform_assignment` if elected leader by the coordinator.
  360. Returns:
  361. Future: resolves to the encoded-bytes assignment returned from the
  362. group leader
  363. """
  364. if self.coordinator_unknown():
  365. e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id)
  366. return Future().failure(e)
  367. elif not self._client.ready(self.coordinator_id, metadata_priority=False):
  368. e = Errors.NodeNotReadyError(self.coordinator_id)
  369. return Future().failure(e)
  370. # send a join group request to the coordinator
  371. log.info("(Re-)joining group %s", self.group_id)
  372. member_metadata = [
  373. (protocol, metadata if isinstance(metadata, bytes) else metadata.encode())
  374. for protocol, metadata in self.group_protocols()
  375. ]
  376. if self.config['api_version'] < (0, 9):
  377. raise Errors.KafkaError('JoinGroupRequest api requires 0.9+ brokers')
  378. elif (0, 9) <= self.config['api_version'] < (0, 10, 1):
  379. request = JoinGroupRequest[0](
  380. self.group_id,
  381. self.config['session_timeout_ms'],
  382. self._generation.member_id,
  383. self.protocol_type(),
  384. member_metadata)
  385. elif (0, 10, 1) <= self.config['api_version'] < (0, 11, 0):
  386. request = JoinGroupRequest[1](
  387. self.group_id,
  388. self.config['session_timeout_ms'],
  389. self.config['max_poll_interval_ms'],
  390. self._generation.member_id,
  391. self.protocol_type(),
  392. member_metadata)
  393. else:
  394. request = JoinGroupRequest[2](
  395. self.group_id,
  396. self.config['session_timeout_ms'],
  397. self.config['max_poll_interval_ms'],
  398. self._generation.member_id,
  399. self.protocol_type(),
  400. member_metadata)
  401. # create the request for the coordinator
  402. log.debug("Sending JoinGroup (%s) to coordinator %s", request, self.coordinator_id)
  403. future = Future()
  404. _f = self._client.send(self.coordinator_id, request)
  405. _f.add_callback(self._handle_join_group_response, future, time.time())
  406. _f.add_errback(self._failed_request, self.coordinator_id,
  407. request, future)
  408. return future
  409. def _failed_request(self, node_id, request, future, error):
  410. # Marking coordinator dead
  411. # unless the error is caused by internal client pipelining
  412. if not isinstance(error, (Errors.NodeNotReadyError,
  413. Errors.TooManyInFlightRequests)):
  414. log.error('Error sending %s to node %s [%s]',
  415. request.__class__.__name__, node_id, error)
  416. self.coordinator_dead(error)
  417. else:
  418. log.debug('Error sending %s to node %s [%s]',
  419. request.__class__.__name__, node_id, error)
  420. future.failure(error)
  421. def _handle_join_group_response(self, future, send_time, response):
  422. error_type = Errors.for_code(response.error_code)
  423. if error_type is Errors.NoError:
  424. log.debug("Received successful JoinGroup response for group %s: %s",
  425. self.group_id, response)
  426. self.sensors.join_latency.record((time.time() - send_time) * 1000)
  427. with self._lock:
  428. if self.state is not MemberState.REBALANCING:
  429. # if the consumer was woken up before a rebalance completes,
  430. # we may have already left the group. In this case, we do
  431. # not want to continue with the sync group.
  432. future.failure(UnjoinedGroupException())
  433. else:
  434. self._generation = Generation(response.generation_id,
  435. response.member_id,
  436. response.group_protocol)
  437. if response.leader_id == response.member_id:
  438. log.info("Elected group leader -- performing partition"
  439. " assignments using %s", self._generation.protocol)
  440. self._on_join_leader(response).chain(future)
  441. else:
  442. self._on_join_follower().chain(future)
  443. elif error_type is Errors.GroupLoadInProgressError:
  444. log.debug("Attempt to join group %s rejected since coordinator %s"
  445. " is loading the group.", self.group_id, self.coordinator_id)
  446. # backoff and retry
  447. future.failure(error_type(response))
  448. elif error_type is Errors.UnknownMemberIdError:
  449. # reset the member id and retry immediately
  450. error = error_type(self._generation.member_id)
  451. self.reset_generation()
  452. log.debug("Attempt to join group %s failed due to unknown member id",
  453. self.group_id)
  454. future.failure(error)
  455. elif error_type in (Errors.GroupCoordinatorNotAvailableError,
  456. Errors.NotCoordinatorForGroupError):
  457. # re-discover the coordinator and retry with backoff
  458. self.coordinator_dead(error_type())
  459. log.debug("Attempt to join group %s failed due to obsolete "
  460. "coordinator information: %s", self.group_id,
  461. error_type.__name__)
  462. future.failure(error_type())
  463. elif error_type in (Errors.InconsistentGroupProtocolError,
  464. Errors.InvalidSessionTimeoutError,
  465. Errors.InvalidGroupIdError):
  466. # log the error and re-throw the exception
  467. error = error_type(response)
  468. log.error("Attempt to join group %s failed due to fatal error: %s",
  469. self.group_id, error)
  470. future.failure(error)
  471. elif error_type is Errors.GroupAuthorizationFailedError:
  472. future.failure(error_type(self.group_id))
  473. else:
  474. # unexpected error, throw the exception
  475. error = error_type()
  476. log.error("Unexpected error in join group response: %s", error)
  477. future.failure(error)
  478. def _on_join_follower(self):
  479. # send follower's sync group with an empty assignment
  480. version = 0 if self.config['api_version'] < (0, 11, 0) else 1
  481. request = SyncGroupRequest[version](
  482. self.group_id,
  483. self._generation.generation_id,
  484. self._generation.member_id,
  485. {})
  486. log.debug("Sending follower SyncGroup for group %s to coordinator %s: %s",
  487. self.group_id, self.coordinator_id, request)
  488. return self._send_sync_group_request(request)
  489. def _on_join_leader(self, response):
  490. """
  491. Perform leader synchronization and send back the assignment
  492. for the group via SyncGroupRequest
  493. Arguments:
  494. response (JoinResponse): broker response to parse
  495. Returns:
  496. Future: resolves to member assignment encoded-bytes
  497. """
  498. try:
  499. group_assignment = self._perform_assignment(response.leader_id,
  500. response.group_protocol,
  501. response.members)
  502. except Exception as e:
  503. return Future().failure(e)
  504. version = 0 if self.config['api_version'] < (0, 11, 0) else 1
  505. request = SyncGroupRequest[version](
  506. self.group_id,
  507. self._generation.generation_id,
  508. self._generation.member_id,
  509. [(member_id,
  510. assignment if isinstance(assignment, bytes) else assignment.encode())
  511. for member_id, assignment in six.iteritems(group_assignment)])
  512. log.debug("Sending leader SyncGroup for group %s to coordinator %s: %s",
  513. self.group_id, self.coordinator_id, request)
  514. return self._send_sync_group_request(request)
  515. def _send_sync_group_request(self, request):
  516. if self.coordinator_unknown():
  517. e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id)
  518. return Future().failure(e)
  519. # We assume that coordinator is ready if we're sending SyncGroup
  520. # as it typically follows a successful JoinGroup
  521. # Also note that if client.ready() enforces a metadata priority policy,
  522. # we can get into an infinite loop if the leader assignment process
  523. # itself requests a metadata update
  524. future = Future()
  525. _f = self._client.send(self.coordinator_id, request)
  526. _f.add_callback(self._handle_sync_group_response, future, time.time())
  527. _f.add_errback(self._failed_request, self.coordinator_id,
  528. request, future)
  529. return future
  530. def _handle_sync_group_response(self, future, send_time, response):
  531. error_type = Errors.for_code(response.error_code)
  532. if error_type is Errors.NoError:
  533. self.sensors.sync_latency.record((time.time() - send_time) * 1000)
  534. future.success(response.member_assignment)
  535. return
  536. # Always rejoin on error
  537. self.request_rejoin()
  538. if error_type is Errors.GroupAuthorizationFailedError:
  539. future.failure(error_type(self.group_id))
  540. elif error_type is Errors.RebalanceInProgressError:
  541. log.debug("SyncGroup for group %s failed due to coordinator"
  542. " rebalance", self.group_id)
  543. future.failure(error_type(self.group_id))
  544. elif error_type in (Errors.UnknownMemberIdError,
  545. Errors.IllegalGenerationError):
  546. error = error_type()
  547. log.debug("SyncGroup for group %s failed due to %s", self.group_id, error)
  548. self.reset_generation()
  549. future.failure(error)
  550. elif error_type in (Errors.GroupCoordinatorNotAvailableError,
  551. Errors.NotCoordinatorForGroupError):
  552. error = error_type()
  553. log.debug("SyncGroup for group %s failed due to %s", self.group_id, error)
  554. self.coordinator_dead(error)
  555. future.failure(error)
  556. else:
  557. error = error_type()
  558. log.error("Unexpected error from SyncGroup: %s", error)
  559. future.failure(error)
  560. def _send_group_coordinator_request(self):
  561. """Discover the current coordinator for the group.
  562. Returns:
  563. Future: resolves to the node id of the coordinator
  564. """
  565. node_id = self._client.least_loaded_node()
  566. if node_id is None:
  567. return Future().failure(Errors.NoBrokersAvailable())
  568. elif not self._client.ready(node_id, metadata_priority=False):
  569. e = Errors.NodeNotReadyError(node_id)
  570. return Future().failure(e)
  571. log.debug("Sending group coordinator request for group %s to broker %s",
  572. self.group_id, node_id)
  573. request = GroupCoordinatorRequest[0](self.group_id)
  574. future = Future()
  575. _f = self._client.send(node_id, request)
  576. _f.add_callback(self._handle_group_coordinator_response, future)
  577. _f.add_errback(self._failed_request, node_id, request, future)
  578. return future
  579. def _handle_group_coordinator_response(self, future, response):
  580. log.debug("Received group coordinator response %s", response)
  581. error_type = Errors.for_code(response.error_code)
  582. if error_type is Errors.NoError:
  583. with self._lock:
  584. coordinator_id = self._client.cluster.add_group_coordinator(self.group_id, response)
  585. if not coordinator_id:
  586. # This could happen if coordinator metadata is different
  587. # than broker metadata
  588. future.failure(Errors.IllegalStateError())
  589. return
  590. self.coordinator_id = coordinator_id
  591. log.info("Discovered coordinator %s for group %s",
  592. self.coordinator_id, self.group_id)
  593. self._client.maybe_connect(self.coordinator_id)
  594. self.heartbeat.reset_timeouts()
  595. future.success(self.coordinator_id)
  596. elif error_type is Errors.GroupCoordinatorNotAvailableError:
  597. log.debug("Group Coordinator Not Available; retry")
  598. future.failure(error_type())
  599. elif error_type is Errors.GroupAuthorizationFailedError:
  600. error = error_type(self.group_id)
  601. log.error("Group Coordinator Request failed: %s", error)
  602. future.failure(error)
  603. else:
  604. error = error_type()
  605. log.error("Group coordinator lookup for group %s failed: %s",
  606. self.group_id, error)
  607. future.failure(error)
  608. def coordinator_dead(self, error):
  609. """Mark the current coordinator as dead."""
  610. if self.coordinator_id is not None:
  611. log.warning("Marking the coordinator dead (node %s) for group %s: %s.",
  612. self.coordinator_id, self.group_id, error)
  613. self.coordinator_id = None
  614. def generation(self):
  615. """Get the current generation state if the group is stable.
  616. Returns: the current generation or None if the group is unjoined/rebalancing
  617. """
  618. with self._lock:
  619. if self.state is not MemberState.STABLE:
  620. return None
  621. return self._generation
  622. def reset_generation(self):
  623. """Reset the generation and memberId because we have fallen out of the group."""
  624. with self._lock:
  625. self._generation = Generation.NO_GENERATION
  626. self.rejoin_needed = True
  627. self.state = MemberState.UNJOINED
  628. def request_rejoin(self):
  629. self.rejoin_needed = True
  630. def _start_heartbeat_thread(self):
  631. if self._heartbeat_thread is None:
  632. log.info('Starting new heartbeat thread')
  633. self._heartbeat_thread = HeartbeatThread(weakref.proxy(self))
  634. self._heartbeat_thread.daemon = True
  635. self._heartbeat_thread.start()
  636. def _close_heartbeat_thread(self):
  637. if self._heartbeat_thread is not None:
  638. log.info('Stopping heartbeat thread')
  639. try:
  640. self._heartbeat_thread.close()
  641. except ReferenceError:
  642. pass
  643. self._heartbeat_thread = None
  644. def __del__(self):
  645. self._close_heartbeat_thread()
  646. def close(self):
  647. """Close the coordinator, leave the current group,
  648. and reset local generation / member_id"""
  649. self._close_heartbeat_thread()
  650. self.maybe_leave_group()
  651. def maybe_leave_group(self):
  652. """Leave the current group and reset local generation/memberId."""
  653. with self._client._lock, self._lock:
  654. if (not self.coordinator_unknown()
  655. and self.state is not MemberState.UNJOINED
  656. and self._generation is not Generation.NO_GENERATION):
  657. # this is a minimal effort attempt to leave the group. we do not
  658. # attempt any resending if the request fails or times out.
  659. log.info('Leaving consumer group (%s).', self.group_id)
  660. version = 0 if self.config['api_version'] < (0, 11, 0) else 1
  661. request = LeaveGroupRequest[version](self.group_id, self._generation.member_id)
  662. future = self._client.send(self.coordinator_id, request)
  663. future.add_callback(self._handle_leave_group_response)
  664. future.add_errback(log.error, "LeaveGroup request failed: %s")
  665. self._client.poll(future=future)
  666. self.reset_generation()
  667. def _handle_leave_group_response(self, response):
  668. error_type = Errors.for_code(response.error_code)
  669. if error_type is Errors.NoError:
  670. log.debug("LeaveGroup request for group %s returned successfully",
  671. self.group_id)
  672. else:
  673. log.error("LeaveGroup request for group %s failed with error: %s",
  674. self.group_id, error_type())
  675. def _send_heartbeat_request(self):
  676. """Send a heartbeat request"""
  677. if self.coordinator_unknown():
  678. e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id)
  679. return Future().failure(e)
  680. elif not self._client.ready(self.coordinator_id, metadata_priority=False):
  681. e = Errors.NodeNotReadyError(self.coordinator_id)
  682. return Future().failure(e)
  683. version = 0 if self.config['api_version'] < (0, 11, 0) else 1
  684. request = HeartbeatRequest[version](self.group_id,
  685. self._generation.generation_id,
  686. self._generation.member_id)
  687. log.debug("Heartbeat: %s[%s] %s", request.group, request.generation_id, request.member_id) # pylint: disable-msg=no-member
  688. future = Future()
  689. _f = self._client.send(self.coordinator_id, request)
  690. _f.add_callback(self._handle_heartbeat_response, future, time.time())
  691. _f.add_errback(self._failed_request, self.coordinator_id,
  692. request, future)
  693. return future
  694. def _handle_heartbeat_response(self, future, send_time, response):
  695. self.sensors.heartbeat_latency.record((time.time() - send_time) * 1000)
  696. error_type = Errors.for_code(response.error_code)
  697. if error_type is Errors.NoError:
  698. log.debug("Received successful heartbeat response for group %s",
  699. self.group_id)
  700. future.success(None)
  701. elif error_type in (Errors.GroupCoordinatorNotAvailableError,
  702. Errors.NotCoordinatorForGroupError):
  703. log.warning("Heartbeat failed for group %s: coordinator (node %s)"
  704. " is either not started or not valid", self.group_id,
  705. self.coordinator())
  706. self.coordinator_dead(error_type())
  707. future.failure(error_type())
  708. elif error_type is Errors.RebalanceInProgressError:
  709. log.warning("Heartbeat failed for group %s because it is"
  710. " rebalancing", self.group_id)
  711. self.request_rejoin()
  712. future.failure(error_type())
  713. elif error_type is Errors.IllegalGenerationError:
  714. log.warning("Heartbeat failed for group %s: generation id is not "
  715. " current.", self.group_id)
  716. self.reset_generation()
  717. future.failure(error_type())
  718. elif error_type is Errors.UnknownMemberIdError:
  719. log.warning("Heartbeat: local member_id was not recognized;"
  720. " this consumer needs to re-join")
  721. self.reset_generation()
  722. future.failure(error_type)
  723. elif error_type is Errors.GroupAuthorizationFailedError:
  724. error = error_type(self.group_id)
  725. log.error("Heartbeat failed: authorization error: %s", error)
  726. future.failure(error)
  727. else:
  728. error = error_type()
  729. log.error("Heartbeat failed: Unhandled error: %s", error)
  730. future.failure(error)
  731. class GroupCoordinatorMetrics(object):
  732. def __init__(self, heartbeat, metrics, prefix, tags=None):
  733. self.heartbeat = heartbeat
  734. self.metrics = metrics
  735. self.metric_group_name = prefix + "-coordinator-metrics"
  736. self.heartbeat_latency = metrics.sensor('heartbeat-latency')
  737. self.heartbeat_latency.add(metrics.metric_name(
  738. 'heartbeat-response-time-max', self.metric_group_name,
  739. 'The max time taken to receive a response to a heartbeat request',
  740. tags), Max())
  741. self.heartbeat_latency.add(metrics.metric_name(
  742. 'heartbeat-rate', self.metric_group_name,
  743. 'The average number of heartbeats per second',
  744. tags), Rate(sampled_stat=Count()))
  745. self.join_latency = metrics.sensor('join-latency')
  746. self.join_latency.add(metrics.metric_name(
  747. 'join-time-avg', self.metric_group_name,
  748. 'The average time taken for a group rejoin',
  749. tags), Avg())
  750. self.join_latency.add(metrics.metric_name(
  751. 'join-time-max', self.metric_group_name,
  752. 'The max time taken for a group rejoin',
  753. tags), Max())
  754. self.join_latency.add(metrics.metric_name(
  755. 'join-rate', self.metric_group_name,
  756. 'The number of group joins per second',
  757. tags), Rate(sampled_stat=Count()))
  758. self.sync_latency = metrics.sensor('sync-latency')
  759. self.sync_latency.add(metrics.metric_name(
  760. 'sync-time-avg', self.metric_group_name,
  761. 'The average time taken for a group sync',
  762. tags), Avg())
  763. self.sync_latency.add(metrics.metric_name(
  764. 'sync-time-max', self.metric_group_name,
  765. 'The max time taken for a group sync',
  766. tags), Max())
  767. self.sync_latency.add(metrics.metric_name(
  768. 'sync-rate', self.metric_group_name,
  769. 'The number of group syncs per second',
  770. tags), Rate(sampled_stat=Count()))
  771. metrics.add_metric(metrics.metric_name(
  772. 'last-heartbeat-seconds-ago', self.metric_group_name,
  773. 'The number of seconds since the last controller heartbeat was sent',
  774. tags), AnonMeasurable(
  775. lambda _, now: (now / 1000) - self.heartbeat.last_send))
  776. class HeartbeatThread(threading.Thread):
  777. def __init__(self, coordinator):
  778. super(HeartbeatThread, self).__init__()
  779. self.name = coordinator.group_id + '-heartbeat'
  780. self.coordinator = coordinator
  781. self.enabled = False
  782. self.closed = False
  783. self.failed = None
  784. def enable(self):
  785. with self.coordinator._lock:
  786. self.enabled = True
  787. self.coordinator.heartbeat.reset_timeouts()
  788. self.coordinator._lock.notify()
  789. def disable(self):
  790. self.enabled = False
  791. def close(self):
  792. self.closed = True
  793. with self.coordinator._lock:
  794. self.coordinator._lock.notify()
  795. if self.is_alive():
  796. self.join(self.coordinator.config['heartbeat_interval_ms'] / 1000)
  797. if self.is_alive():
  798. log.warning("Heartbeat thread did not fully terminate during close")
  799. def run(self):
  800. try:
  801. log.debug('Heartbeat thread started')
  802. while not self.closed:
  803. self._run_once()
  804. except ReferenceError:
  805. log.debug('Heartbeat thread closed due to coordinator gc')
  806. except RuntimeError as e:
  807. log.error("Heartbeat thread for group %s failed due to unexpected error: %s",
  808. self.coordinator.group_id, e)
  809. self.failed = e
  810. finally:
  811. log.debug('Heartbeat thread closed')
  812. def _run_once(self):
  813. with self.coordinator._client._lock, self.coordinator._lock:
  814. if self.enabled and self.coordinator.state is MemberState.STABLE:
  815. # TODO: When consumer.wakeup() is implemented, we need to
  816. # disable here to prevent propagating an exception to this
  817. # heartbeat thread
  818. # must get client._lock, or maybe deadlock at heartbeat
  819. # failure callbak in consumer poll
  820. self.coordinator._client.poll(timeout_ms=0)
  821. with self.coordinator._lock:
  822. if not self.enabled:
  823. log.debug('Heartbeat disabled. Waiting')
  824. self.coordinator._lock.wait()
  825. log.debug('Heartbeat re-enabled.')
  826. return
  827. if self.coordinator.state is not MemberState.STABLE:
  828. # the group is not stable (perhaps because we left the
  829. # group or because the coordinator kicked us out), so
  830. # disable heartbeats and wait for the main thread to rejoin.
  831. log.debug('Group state is not stable, disabling heartbeats')
  832. self.disable()
  833. return
  834. if self.coordinator.coordinator_unknown():
  835. future = self.coordinator.lookup_coordinator()
  836. if not future.is_done or future.failed():
  837. # the immediate future check ensures that we backoff
  838. # properly in the case that no brokers are available
  839. # to connect to (and the future is automatically failed).
  840. self.coordinator._lock.wait(self.coordinator.config['retry_backoff_ms'] / 1000)
  841. elif self.coordinator.heartbeat.session_timeout_expired():
  842. # the session timeout has expired without seeing a
  843. # successful heartbeat, so we should probably make sure
  844. # the coordinator is still healthy.
  845. log.warning('Heartbeat session expired, marking coordinator dead')
  846. self.coordinator.coordinator_dead('Heartbeat session expired')
  847. elif self.coordinator.heartbeat.poll_timeout_expired():
  848. # the poll timeout has expired, which means that the
  849. # foreground thread has stalled in between calls to
  850. # poll(), so we explicitly leave the group.
  851. log.warning('Heartbeat poll expired, leaving group')
  852. self.coordinator.maybe_leave_group()
  853. elif not self.coordinator.heartbeat.should_heartbeat():
  854. # poll again after waiting for the retry backoff in case
  855. # the heartbeat failed or the coordinator disconnected
  856. log.log(0, 'Not ready to heartbeat, waiting')
  857. self.coordinator._lock.wait(self.coordinator.config['retry_backoff_ms'] / 1000)
  858. else:
  859. self.coordinator.heartbeat.sent_heartbeat()
  860. future = self.coordinator._send_heartbeat_request()
  861. future.add_callback(self._handle_heartbeat_success)
  862. future.add_errback(self._handle_heartbeat_failure)
  863. def _handle_heartbeat_success(self, result):
  864. with self.coordinator._lock:
  865. self.coordinator.heartbeat.received_heartbeat()
  866. def _handle_heartbeat_failure(self, exception):
  867. with self.coordinator._lock:
  868. if isinstance(exception, Errors.RebalanceInProgressError):
  869. # it is valid to continue heartbeating while the group is
  870. # rebalancing. This ensures that the coordinator keeps the
  871. # member in the group for as long as the duration of the
  872. # rebalance timeout. If we stop sending heartbeats, however,
  873. # then the session timeout may expire before we can rejoin.
  874. self.coordinator.heartbeat.received_heartbeat()
  875. else:
  876. self.coordinator.heartbeat.fail_heartbeat()
  877. # wake up the thread if it's sleeping to reschedule the heartbeat
  878. self.coordinator._lock.notify()