Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -35,20 +35,26 @@ def _derive_cluster_id(self):
gfs_peer_data[peer[0]] = {"connected": peer[-1],
"hostname": peer[-2]}

gfs_peers_uuid.sort()
return (hashlib.sha256("".join(gfs_peers_uuid)).hexdigest(),
gfs_peer_data)
# No need to display one node cluster
if len(gfs_peers_uuid) != 1:
gfs_peers_uuid.sort()
return (hashlib.sha256("".join(gfs_peers_uuid)).hexdigest(),
gfs_peer_data)
else:
return "", {}

def discover_storage_system(self):
ret_val = {}

# get the gluster version details
# form the temporary cluster_id
cluster_id, gfs_peers = self._derive_cluster_id()
ret_val['detected_cluster_id'] = cluster_id
ret_val['detected_cluster_name'] = "gluster-%s" % cluster_id
ret_val['peers'] = gfs_peers

if cluster_id:
ret_val['detected_cluster_id'] = cluster_id
ret_val['detected_cluster_name'] = "gluster-%s" % cluster_id
ret_val['peers'] = gfs_peers
else:
return ret_val
# Check if the file /usr/share/glusterfs/release exists.
# if the file exists, read the version details from this
if os.path.exists('/usr/share/glusterfs/release'):
Expand Down
101 changes: 83 additions & 18 deletions tendrl/node_agent/node_sync/sds_detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,34 +106,34 @@ def sync(sync_ttl, node_status_ttl):
if dc.detected_cluster_id and \
dc.detected_cluster_id != sds_details.get(
'detected_cluster_id'):

# Gluster peer list has changed
integration_id = \
NS.tendrl_context.integration_id
etcd_utils.write(
integration_index_key,
integration_id
)
# Set the cluster status as new peer detected
_cluster = NS.tendrl.objects.Cluster(
integration_id=integration_id
).load()
_cluster.status = "new_peers_detected"
_cluster.save()
# Raise an alert regarding the same
msg = "New peers identified in cluster: %s. " \
"Make sure tendrl-ansible is executed " \
"for the new nodes so that expand " \
"cluster option can be triggered" % \
_cluster.short_name
event_utils.emit_event(
"cluster_status",
"new_peers_detected",
msg,
"cluster_{0}".format(integration_id),
"WARNING",
integration_id=integration_id
)
if _cluster.is_managed == "yes":
# Set the cluster status as new peer detected
_cluster.status = "new_peers_detected"
_cluster.save()
# Raise an alert regarding the same
msg = "New peers identified in cluster: %s. " \
"Make sure tendrl-ansible is executed " \
"for the new nodes so that expand " \
"cluster option can be triggered" % \
_cluster.short_name
event_utils.emit_event(
"cluster_status",
"new_peers_detected",
msg,
"cluster_{0}".format(integration_id),
"WARNING",
integration_id=integration_id
)
_cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id
).load()
Expand Down Expand Up @@ -192,6 +192,17 @@ def sync(sync_ttl, node_status_ttl):
if integration_id:
break
except etcd.EtcdKeyNotFound:
if NS.tendrl_context.integration_id:
# If node already have integration_id
# and provisioner is not claimed by
# anyone then return backe to claim
try:
etcd_utils.read(
"indexes/tags/provisioner/%s" %
NS.tendrl_context.integration_id
)
except etcd.EtcdKeyNotFound:
return
loop_count += 1
continue

Expand All @@ -212,6 +223,32 @@ def sync(sync_ttl, node_status_ttl):
detected_cluster_tag = "detected_cluster/%s" % \
sds_details[
'detected_cluster_id']
# Detected cluster id will change when new node
# added into peer list and when peer detach happens,
# Node_context should not maintain multiple DC ids
old_dc_id = "detected_cluster/%s" % dc.detected_cluster_id
if old_dc_id in NS.node_context.tags and \
old_dc_id != detected_cluster_tag:
NS.node_context.tags.remove(old_dc_id)
# remove old detected cluster_id from indexes
indexes_keys = []
indexes_keys.append(
"indexes/detected_cluster_id_to_integration_id/%s" %
dc.detected_cluster_id
)
indexes_keys.append(
"indexes/tags/detected_cluster/%s" %
dc.detected_cluster_id
)
for indexes_key in indexes_keys:
try:
etcd_utils.delete(
indexes_key
)
except etcd.EtcdKeyNotFound:
# It may be removed by other nodes
# in a same cluster
pass
NS.node_context.tags += [detected_cluster_tag,
integration_tag]
NS.node_context.tags = list(set(NS.node_context.tags))
Expand Down Expand Up @@ -245,6 +282,34 @@ def sync(sync_ttl, node_status_ttl):
)
)
break
else:
# if detected id not present then node is
# detached from peer or glusterd is down
# in both the case remove the provisioner tag
_ptag = "provisioner/%s" % \
NS.tendrl_context.integration_id
if _ptag in NS.node_context.tags:
NS.node_context.tags.remove(_ptag)
_index_key = "/indexes/tags/%s" % _ptag
try:
etcd_utils.delete(_index_key)
except etcd.EtcdKeyNotFound:
pass
dc = NS.tendrl.objects.DetectedCluster().load()
dc_id = "detected_cluster/%s" % dc.detected_cluster_id
if dc_id in NS.node_context.tags:
# remove detected cluster id tag from tag
# This change to assign provisioner tag only
# for the node which have detected cluster_id
NS.node_context.tags.remove(dc_id)
if NS.tendrl_context.integration_id:
integration_tag = "tendrl/integration/%s" % \
NS.tendrl_context.integration_id
if integration_tag in NS.node_context.tags:
# Glusterd down or peer detached node should
# not pickup any cluster related parent jobs.
NS.node_context.tags.remove(integration_tag)
NS.node_context.save()
except Exception as ex:
Event(
ExceptionMessage(
Expand Down
7 changes: 5 additions & 2 deletions tendrl/node_agent/node_sync/services_and_index_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,10 @@ def sync(sync_ttl=None):
).load()
if _cnc.is_managed == "yes":
_cnc_is_managed = True
if _cluster.is_managed in [None, '', 'no']:
dc = NS.tendrl.objects.DetectedCluster().load()
dc_tag = "detected_cluster/%s" % dc.detected_cluster_id
if _cluster.is_managed in [None, '', 'no'] and \
dc_tag in NS.node_context.tags:
if _tag not in NS.node_context.tags:
try:
_index_key = "/indexes/tags/%s" % _tag
Expand All @@ -74,7 +77,7 @@ def sync(sync_ttl=None):
_is_new_provisioner = True
except etcd.EtcdAlreadyExist:
pass
else:
elif dc_tag in NS.node_context.tags:
if _tag not in NS.node_context.tags and _cnc_is_managed:
try:
_index_key = "/indexes/tags/%s" % _tag
Expand Down