@@ -106,34 +106,34 @@ def sync(sync_ttl, node_status_ttl):
106106 if dc .detected_cluster_id and \
107107 dc .detected_cluster_id != sds_details .get (
108108 'detected_cluster_id' ):
109-
110109 # Gluster peer list has changed
111110 integration_id = \
112111 NS .tendrl_context .integration_id
113112 etcd_utils .write (
114113 integration_index_key ,
115114 integration_id
116115 )
117- # Set the cluster status as new peer detected
118116 _cluster = NS .tendrl .objects .Cluster (
119117 integration_id = integration_id
120118 ).load ()
121- _cluster .status = "new_peers_detected"
122- _cluster .save ()
123- # Raise an alert regarding the same
124- msg = "New peers identified in cluster: %s. " \
125- "Make sure tendrl-ansible is executed " \
126- "for the new nodes so that expand " \
127- "cluster option can be triggered" % \
128- _cluster .short_name
129- event_utils .emit_event (
130- "cluster_status" ,
131- "new_peers_detected" ,
132- msg ,
133- "cluster_{0}" .format (integration_id ),
134- "WARNING" ,
135- integration_id = integration_id
136- )
119+ if _cluster .is_managed == "yes" :
120+ # Set the cluster status as new peer detected
121+ _cluster .status = "new_peers_detected"
122+ _cluster .save ()
123+ # Raise an alert regarding the same
124+ msg = "New peers identified in cluster: %s. " \
125+ "Make sure tendrl-ansible is executed " \
126+ "for the new nodes so that expand " \
127+ "cluster option can be triggered" % \
128+ _cluster .short_name
129+ event_utils .emit_event (
130+ "cluster_status" ,
131+ "new_peers_detected" ,
132+ msg ,
133+ "cluster_{0}" .format (integration_id ),
134+ "WARNING" ,
135+ integration_id = integration_id
136+ )
137137 _cluster = NS .tendrl .objects .Cluster (
138138 integration_id = NS .tendrl_context .integration_id
139139 ).load ()
@@ -192,6 +192,17 @@ def sync(sync_ttl, node_status_ttl):
192192 if integration_id :
193193 break
194194 except etcd .EtcdKeyNotFound :
195+ if NS .tendrl_context .integration_id :
196+ # If node already have integration_id
197+ # and provisioner is not claimed by
198+ # anyone then return backe to claim
199+ try :
200+ etcd_utils .read (
201+ "indexes/tags/provisioner/%s" %
202+ NS .tendrl_context .integration_id
203+ )
204+ except etcd .EtcdKeyNotFound :
205+ return
195206 loop_count += 1
196207 continue
197208
@@ -212,6 +223,32 @@ def sync(sync_ttl, node_status_ttl):
212223 detected_cluster_tag = "detected_cluster/%s" % \
213224 sds_details [
214225 'detected_cluster_id' ]
226+ # Detected cluster id will change when new node
227+ # added into peer list and when peer detach happens,
228+ # Node_context should not maintain multiple DC ids
229+ old_dc_id = "detected_cluster/%s" % dc .detected_cluster_id
230+ if old_dc_id in NS .node_context .tags and \
231+ old_dc_id != detected_cluster_tag :
232+ NS .node_context .tags .remove (old_dc_id )
233+ # remove old detected cluster_id from indexes
234+ indexes_keys = []
235+ indexes_keys .append (
236+ "indexes/detected_cluster_id_to_integration_id/%s" %
237+ dc .detected_cluster_id
238+ )
239+ indexes_keys .append (
240+ "indexes/tags/detected_cluster/%s" %
241+ dc .detected_cluster_id
242+ )
243+ for indexes_key in indexes_keys :
244+ try :
245+ etcd_utils .delete (
246+ indexes_key
247+ )
248+ except etcd .EtcdKeyNotFound :
249+ # It may be removed by other nodes
250+ # in a same cluster
251+ pass
215252 NS .node_context .tags += [detected_cluster_tag ,
216253 integration_tag ]
217254 NS .node_context .tags = list (set (NS .node_context .tags ))
@@ -245,6 +282,34 @@ def sync(sync_ttl, node_status_ttl):
245282 )
246283 )
247284 break
285+ else :
286+ # if detected id not present then node is
287+ # detached from peer or glusterd is down
288+ # in both the case remove the provisioner tag
289+ _ptag = "provisioner/%s" % \
290+ NS .tendrl_context .integration_id
291+ if _ptag in NS .node_context .tags :
292+ NS .node_context .tags .remove (_ptag )
293+ _index_key = "/indexes/tags/%s" % _ptag
294+ try :
295+ etcd_utils .delete (_index_key )
296+ except etcd .EtcdKeyNotFound :
297+ pass
298+ dc = NS .tendrl .objects .DetectedCluster ().load ()
299+ dc_id = "detected_cluster/%s" % dc .detected_cluster_id
300+ if dc_id in NS .node_context .tags :
301+ # remove detected cluster id tag from tag
302+ # This change to assign provisioner tag only
303+ # for the node which have detected cluster_id
304+ NS .node_context .tags .remove (dc_id )
305+ if NS .tendrl_context .integration_id :
306+ integration_tag = "tendrl/integration/%s" % \
307+ NS .tendrl_context .integration_id
308+ if integration_tag in NS .node_context .tags :
309+ # Glusterd down or peer detached node should
310+ # not pickup any cluster related parent jobs.
311+ NS .node_context .tags .remove (integration_tag )
312+ NS .node_context .save ()
248313 except Exception as ex :
249314 Event (
250315 ExceptionMessage (
0 commit comments