@@ -556,7 +556,7 @@ def compare_config():
556556
557557 pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_change ]["database" ] = "bar"
558558 del pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_remove ]
559-
559+
560560 k8s .api .custom_objects_api .patch_namespaced_custom_object (
561561 "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_delete_slot_patch )
562562
@@ -573,7 +573,7 @@ def compare_config():
573573
574574 self .eventuallyEqual (lambda : self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("database" , slot_to_change ))[0 ], "bar" ,
575575 "The replication slot cannot be updated" , 10 , 5 )
576-
576+
577577 # make sure slot from Patroni didn't get deleted
578578 self .eventuallyEqual (lambda : len (self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("slot_name" , patroni_slot ))), 1 ,
579579 "The replication slot from Patroni gets deleted" , 10 , 5 )
@@ -929,7 +929,7 @@ def test_ignored_annotations(self):
929929 },
930930 }
931931 }
932-
932+
933933 old_sts_creation_timestamp = sts .metadata .creation_timestamp
934934 k8s .api .apps_v1 .patch_namespaced_stateful_set (sts .metadata .name , sts .metadata .namespace , annotation_patch )
935935 old_svc_creation_timestamp = svc .metadata .creation_timestamp
@@ -1254,7 +1254,7 @@ def test_persistent_volume_claim_retention_policy(self):
12541254 }
12551255 k8s .update_config (patch_scaled_policy_retain )
12561256 self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1257-
1257+
12581258 # decrease the number of instances
12591259 k8s .api .custom_objects_api .patch_namespaced_custom_object (
12601260 'acid.zalan.do' , 'v1' , 'default' , 'postgresqls' , 'acid-minimal-cluster' , pg_patch_scale_down_instances )
@@ -1622,7 +1622,7 @@ def test_password_rotation(self):
16221622 },
16231623 }
16241624 k8s .api .core_v1 .patch_namespaced_secret (
1625- name = "foo-user.acid-minimal-cluster.credentials.postgresql.acid.zalan.do" ,
1625+ name = "foo-user.acid-minimal-cluster.credentials.postgresql.acid.zalan.do" ,
16261626 namespace = "default" ,
16271627 body = secret_fake_rotation )
16281628
@@ -1638,7 +1638,7 @@ def test_password_rotation(self):
16381638 "data" : {
16391639 "enable_password_rotation" : "true" ,
16401640 "password_rotation_interval" : "30" ,
1641- "password_rotation_user_retention" : "30" , # should be set to 60
1641+ "password_rotation_user_retention" : "30" , # should be set to 60
16421642 },
16431643 }
16441644 k8s .update_config (enable_password_rotation )
@@ -1691,7 +1691,7 @@ def test_password_rotation(self):
16911691 "Unexpected username in secret of test.db_user: expected {}, got {}" .format ("test.db_user" , secret_username ))
16921692
16931693 # disable password rotation for all other users (foo_user)
1694- # and pick smaller intervals to see if the third fake rotation user is dropped
1694+ # and pick smaller intervals to see if the third fake rotation user is dropped
16951695 enable_password_rotation = {
16961696 "data" : {
16971697 "enable_password_rotation" : "false" ,
@@ -2158,7 +2158,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
21582158
21592159 # if nodes are different we can quit here
21602160 if master_nodes [0 ] not in replica_nodes :
2161- return True
2161+ return True
21622162
21632163 # enable pod anti affintiy in config map which should trigger movement of replica
21642164 patch_enable_antiaffinity = {
@@ -2182,7 +2182,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
21822182 }
21832183 k8s .update_config (patch_disable_antiaffinity , "disable antiaffinity" )
21842184 self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
2185-
2185+
21862186 k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_labels )
21872187 k8s .wait_for_running_pods (cluster_labels , 2 )
21882188
@@ -2193,7 +2193,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
21932193 # if nodes are different we can quit here
21942194 for target_node in target_nodes :
21952195 if (target_node not in master_nodes or target_node not in replica_nodes ) and master_nodes [0 ] in replica_nodes :
2196- print ('Pods run on the same node' )
2196+ print ('Pods run on the same node' )
21972197 return False
21982198
21992199 except timeout_decorator .TimeoutError :
@@ -2272,5 +2272,74 @@ def query_database_with_user(self, pod_name, db_name, query, user_name):
22722272
22732273 return result_set
22742274
2275+ def test_topology_spread_constraints (self ):
2276+ '''
2277+ Enable topologySpreadConstraints for pods
2278+ '''
2279+ k8s = self .k8s
2280+ cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
2281+
2282+ # Verify we are in good state from potential previous tests
2283+ self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 , "No 2 pods running" )
2284+
2285+ patch_node_label = {
2286+ "metadata" : {
2287+ "labels" : {
2288+ "topology.kubernetes.io/zone" : "zalando"
2289+ }
2290+ }
2291+ }
2292+
2293+ nodes = k8s .api .core_v1 .list_node ()
2294+ for node in nodes .items :
2295+ k8s .api .core_v1 .patch_node (node .metadata .name , patch_node_label )
2296+
2297+ podsList = k8s .api .core_v1 .list_namespaced_pod ('default' , label_selector = cluster_label )
2298+ k8s .wait_for_pod_start ('spilo-role=master,' + cluster_label )
2299+ k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_label )
2300+
2301+ patch_cordon_node = {
2302+ "spec" : {
2303+ "unschedulable" : True
2304+ }
2305+ }
2306+
2307+ master_nodes , replica_nodes = k8s .get_cluster_nodes ()
2308+ self .assertNotEqual (master_nodes , [])
2309+ self .assertNotEqual (replica_nodes , [])
2310+
2311+ # Cordon replicas node
2312+ k8s .api .core_v1 .patch_node (replica_nodes [0 ], patch_cordon_node )
2313+ # Delete replicas pod so it can be re-scheduled to master node
2314+ replicas_pod = k8s .get_cluster_replica_pod ()
2315+ k8s .api .core_v1 .delete_namespaced_pod (replicas_pod .metadata .name , 'default' )
2316+ # Wait for replicas pod re-scheduled to master node
2317+ k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_label )
2318+ # Assert master pod and replicas pod are in the same node
2319+ master_nodes , replica_nodes = k8s .get_cluster_nodes ()
2320+ self .assertEqual (master_nodes [0 ], replica_nodes [0 ])
2321+
2322+ patch_uncordon_node = {
2323+ "spec" : {
2324+ "unschedulable" : False
2325+ }
2326+ }
2327+
2328+ # Uncordon replicas node
2329+ k8s .api .core_v1 .patch_node (replica_nodes [0 ], patch_uncordon_node )
2330+
2331+ patch_enable_topology_spread_constraints = {
2332+ "data" : {
2333+ "enable_postgres_topology_spread_constraints" : "true"
2334+ }
2335+ }
2336+
2337+ k8s .update_config (patch_enable_topology_spread_constraints , "enable topologySpreadConstraints" )
2338+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
2339+
2340+ # Assert master pod and replicas pod are spread in two diffrence nodes
2341+ master_nodes , replica_nodes = k8s .get_cluster_nodes ()
2342+ self .assertNotEqual (master_nodes [0 ], replica_nodes [0 ])
2343+
22752344if __name__ == '__main__' :
22762345 unittest .main ()
0 commit comments