@@ -86,7 +86,8 @@ def __init__(self, client, subscription, metrics, metric_group_prefix,
86
86
assert self .config ['assignors' ], 'Coordinator requires assignors'
87
87
88
88
self ._subscription = subscription
89
- self ._partitions_per_topic = {}
89
+ self ._metadata_snapshot = {}
90
+ self ._assignment_snapshot = None
90
91
self ._cluster = client .cluster
91
92
self ._cluster .request_update ()
92
93
self ._cluster .add_listener (WeakMethod (self ._handle_metadata_update ))
@@ -150,7 +151,7 @@ def _handle_metadata_update(self, cluster):
150
151
151
152
# check if there are any changes to the metadata which should trigger
152
153
# a rebalance
153
- if self ._subscription_metadata_changed ():
154
+ if self ._subscription_metadata_changed (cluster ):
154
155
155
156
if (self .config ['api_version' ] >= (0 , 9 )
156
157
and self .config ['group_id' ] is not None ):
@@ -163,20 +164,20 @@ def _handle_metadata_update(self, cluster):
163
164
self ._subscription .assign_from_subscribed ([
164
165
TopicPartition (topic , partition )
165
166
for topic in self ._subscription .subscription
166
- for partition in self ._partitions_per_topic [topic ]
167
+ for partition in self ._metadata_snapshot [topic ]
167
168
])
168
169
169
- def _subscription_metadata_changed (self ):
170
+ def _subscription_metadata_changed (self , cluster ):
170
171
if not self ._subscription .partitions_auto_assigned ():
171
172
return False
172
173
173
- old_partitions_per_topic = self ._partitions_per_topic
174
- self ._partitions_per_topic = {}
174
+ metadata_snapshot = {}
175
175
for topic in self ._subscription .group_subscription ():
176
- partitions = self . _cluster .partitions_for_topic (topic ) or []
177
- self . _partitions_per_topic [topic ] = set (partitions )
176
+ partitions = cluster .partitions_for_topic (topic ) or []
177
+ metadata_snapshot [topic ] = set (partitions )
178
178
179
- if self ._partitions_per_topic != old_partitions_per_topic :
179
+ if self ._metadata_snapshot != metadata_snapshot :
180
+ self ._metadata_snapshot = metadata_snapshot
180
181
return True
181
182
return False
182
183
@@ -188,8 +189,15 @@ def _lookup_assignor(self, name):
188
189
189
190
def _on_join_complete (self , generation , member_id , protocol ,
190
191
member_assignment_bytes ):
192
+ # if we were the assignor, then we need to make sure that there have
193
+ # been no metadata updates since the rebalance begin. Otherwise, we
194
+ # won't rebalance again until the next metadata change
195
+ if self ._assignment_snapshot and self ._assignment_snapshot != self ._metadata_snapshot :
196
+ self ._subscription .mark_for_reassignment ()
197
+ return
198
+
191
199
assignor = self ._lookup_assignor (protocol )
192
- assert assignor , 'invalid assignment protocol: %s' % protocol
200
+ assert assignor , 'Coordinator selected invalid assignment protocol: %s' % protocol
193
201
194
202
assignment = ConsumerProtocol .ASSIGNMENT .decode (member_assignment_bytes )
195
203
@@ -239,6 +247,11 @@ def _perform_assignment(self, leader_id, assignment_strategy, members):
239
247
self ._subscription .group_subscribe (all_subscribed_topics )
240
248
self ._client .set_topics (self ._subscription .group_subscription ())
241
249
250
+ # keep track of the metadata used for assignment so that we can check
251
+ # after rebalance completion whether anything has changed
252
+ self ._cluster .request_update ()
253
+ self ._assignment_snapshot = self ._metadata_snapshot
254
+
242
255
log .debug ("Performing assignment for group %s using strategy %s"
243
256
" with subscriptions %s" , self .group_id , assignor .name ,
244
257
member_metadata )
@@ -268,6 +281,7 @@ def _on_join_prepare(self, generation, member_id):
268
281
" for group %s failed on_partitions_revoked" ,
269
282
self ._subscription .listener , self .group_id )
270
283
284
+ self ._assignment_snapshot = None
271
285
self ._subscription .mark_for_reassignment ()
272
286
273
287
def need_rejoin (self ):
0 commit comments