Skip to content

Commit f7fb00a

Browse files
committed
Added new test for HyperShift clusters can be properly scanned for PCI-DSS compliance
1 parent 30241be commit f7fb00a

2 files changed

Lines changed: 246 additions & 8 deletions

File tree

tests/e2e/framework/common.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ func (f *Framework) addFrameworks() error {
290290
}
291291

292292
// MCO objects
293-
if f.Platform != "rosa" {
293+
if f.Platform != "rosa" && f.Platform != "HyperShift" {
294294
mcoObjs := [2]dynclient.ObjectList{
295295
&mcfgv1.MachineConfigPoolList{},
296296
&mcfgv1.MachineConfigList{},
@@ -600,7 +600,7 @@ func (f *Framework) GetReadyProfileBundle(name, namespace string) (*compv1alpha1
600600
}
601601

602602
func (f *Framework) updateScanSettingsForDebug() error {
603-
if f.Platform == "rosa" {
603+
if f.Platform == "rosa" || f.Platform == "HyperShift" {
604604
fmt.Printf("bypassing ScanSettings test setup because it's not supported on %s\n", f.Platform)
605605
return nil
606606
}
@@ -622,7 +622,7 @@ func (f *Framework) updateScanSettingsForDebug() error {
622622
}
623623

624624
func (f *Framework) ensureE2EScanSettings() error {
625-
if f.Platform == "rosa" {
625+
if f.Platform == "rosa" || f.Platform == "HyperShift" {
626626
fmt.Printf("bypassing ScanSettings test setup because it's not supported on %s\n", f.Platform)
627627
return nil
628628
}
@@ -652,7 +652,7 @@ func (f *Framework) ensureE2EScanSettings() error {
652652
}
653653

654654
func (f *Framework) deleteScanSettings(name string) error {
655-
if f.Platform == "rosa" {
655+
if f.Platform == "rosa" || f.Platform == "HyperShift" {
656656
fmt.Printf("bypassing ScanSettings test setup because it's not supported on %s\n", f.Platform)
657657
return nil
658658
}
@@ -670,7 +670,7 @@ func (f *Framework) deleteScanSettings(name string) error {
670670
}
671671

672672
func (f *Framework) createMachineConfigPool(n string) error {
673-
if f.Platform == "rosa" {
673+
if f.Platform == "rosa" || f.Platform == "HyperShift" {
674674
fmt.Printf("bypassing MachineConfigPool test setup because it's not supported on %s\n", f.Platform)
675675
return nil
676676
}
@@ -792,7 +792,7 @@ func (f *Framework) createMachineConfigPool(n string) error {
792792
}
793793

794794
func (f *Framework) createInvalidMachineConfigPool(n string) error {
795-
if f.Platform == "rosa" {
795+
if f.Platform == "rosa" || f.Platform == "HyperShift" {
796796
fmt.Printf("bypassing MachineConfigPool test setup because it's not supported on %s\n", f.Platform)
797797
return nil
798798
}
@@ -823,7 +823,7 @@ func (f *Framework) createInvalidMachineConfigPool(n string) error {
823823
}
824824

825825
func (f *Framework) cleanUpMachineConfigPool(n string) error {
826-
if f.Platform == "rosa" {
826+
if f.Platform == "rosa" || f.Platform == "HyperShift" {
827827
fmt.Printf("bypassing MachineConfigPool cleanup because it's not supported on %s\n", f.Platform)
828828
return nil
829829
}
@@ -841,7 +841,7 @@ func (f *Framework) cleanUpMachineConfigPool(n string) error {
841841
}
842842

843843
func (f *Framework) restoreNodeLabelsForPool(n string) error {
844-
if f.Platform == "rosa" {
844+
if f.Platform == "rosa" || f.Platform == "HyperShift" {
845845
fmt.Printf("bypassing node label restoration because MachineConfigPools are not supported on %s\n", f.Platform)
846846
return nil
847847
}

tests/e2e/serial/main_test.go

Lines changed: 238 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2164,6 +2164,244 @@ func TestScanTailoredProfileExtendsDeprecated(t *testing.T) {
21642164
}
21652165
}
21662166

2167+
// TestScanHyperShiftHostedClusterPCIDSS tests scanning HyperShift hosted clusters with PCI-DSS profiles
2168+
func TestScanHyperShiftHostedClusterPCIDSS(t *testing.T) {
2169+
f := framework.Global
2170+
2171+
// Check if running on a HyperShift hosted cluster
2172+
infra := &configv1.Infrastructure{}
2173+
err := f.Client.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, infra)
2174+
if err != nil {
2175+
t.Fatalf("Failed to get cluster infrastructure: %s", err)
2176+
}
2177+
2178+
// Skip test if not running on a HyperShift (External control plane) cluster
2179+
if infra.Status.ControlPlaneTopology != configv1.ExternalTopologyMode {
2180+
t.Skip("Skipping HyperShift test - not running on a hosted cluster (External control plane)")
2181+
}
2182+
2183+
tpName := "test-hypershift-pci-dss-tailored-profile"
2184+
tp := &compv1alpha1.TailoredProfile{
2185+
ObjectMeta: metav1.ObjectMeta{
2186+
Name: tpName,
2187+
Namespace: f.OperatorNamespace,
2188+
Annotations: map[string]string{
2189+
"compliance.openshift.io/product-type": "Platform",
2190+
},
2191+
},
2192+
Spec: compv1alpha1.TailoredProfileSpec{
2193+
Extends: "ocp4-pci-dss",
2194+
Title: "PCIDSS for Hypershift",
2195+
Description: "PCIDSS for Hypershift Master-plane components",
2196+
DisableRules: []compv1alpha1.RuleReferenceSpec{
2197+
// Rules below are checked on the Management cluster in HyperShift architecture
2198+
{Name: "ocp4-api-server-admission-control-plugin-namespacelifecycle", Rationale: "This rule is being checked on the Management cluster"},
2199+
{Name: "ocp4-api-server-admission-control-plugin-noderestriction", Rationale: "This rule is being checked on the Management cluster"},
2200+
{Name: "ocp4-api-server-admission-control-plugin-scc", Rationale: "This rule is being checked on the Management cluster"},
2201+
{Name: "ocp4-api-server-admission-control-plugin-service-account", Rationale: "This rule is being checked on the Management cluster"},
2202+
{Name: "ocp4-api-server-audit-log-maxbackup", Rationale: "This rule is being checked on the Management cluster"},
2203+
{Name: "ocp4-api-server-audit-log-maxsize", Rationale: "This rule is being checked on the Management cluster"},
2204+
{Name: "ocp4-ocp-api-server-audit-log-maxbackup", Rationale: "This rule is being checked on the Management cluster"},
2205+
{Name: "ocp4-ocp-api-server-audit-log-maxsize", Rationale: "This rule is being checked on the Management cluster"},
2206+
{Name: "ocp4-api-server-audit-log-path", Rationale: "This rule is being checked on the Management cluster"},
2207+
{Name: "ocp4-api-server-auth-mode-node", Rationale: "This rule is being checked on the Management cluster"},
2208+
{Name: "ocp4-api-server-auth-mode-rbac", Rationale: "This rule is being checked on the Management cluster"},
2209+
{Name: "ocp4-api-server-bind-address", Rationale: "This rule is being checked on the Management cluster"},
2210+
{Name: "ocp4-api-server-client-ca", Rationale: "This rule is being checked on the Management cluster"},
2211+
{Name: "ocp4-api-server-encryption-provider-cipher", Rationale: "This rule is being checked on the Management cluster"},
2212+
{Name: "ocp4-api-server-etcd-ca", Rationale: "This rule is being checked on the Management cluster"},
2213+
{Name: "ocp4-api-server-etcd-cert", Rationale: "This rule is being checked on the Management cluster"},
2214+
{Name: "ocp4-api-server-etcd-key", Rationale: "This rule is being checked on the Management cluster"},
2215+
{Name: "ocp4-api-server-kubelet-certificate-authority", Rationale: "This rule is being checked on the Management cluster"},
2216+
{Name: "ocp4-api-server-kubelet-client-cert", Rationale: "This rule is being checked on the Management cluster"},
2217+
{Name: "ocp4-api-server-kubelet-client-key", Rationale: "This rule is being checked on the Management cluster"},
2218+
{Name: "ocp4-api-server-request-timeout", Rationale: "This rule is being checked on the Management cluster"},
2219+
{Name: "ocp4-api-server-service-account-lookup", Rationale: "This rule is being checked on the Management cluster"},
2220+
{Name: "ocp4-api-server-service-account-public-key", Rationale: "This rule is being checked on the Management cluster"},
2221+
{Name: "ocp4-api-server-tls-cert", Rationale: "This rule is being checked on the Management cluster"},
2222+
{Name: "ocp4-api-server-tls-cipher-suites", Rationale: "This rule is being checked on the Management cluster"},
2223+
{Name: "ocp4-api-server-tls-private-key", Rationale: "This rule is being checked on the Management cluster"},
2224+
{Name: "ocp4-audit-log-forwarding-enabled", Rationale: "This rule is being checked on the Management cluster"},
2225+
{Name: "ocp4-controller-insecure-port-disabled", Rationale: "This rule is being checked on the Management cluster"},
2226+
{Name: "ocp4-controller-secure-port", Rationale: "This rule is being checked on the Management cluster"},
2227+
{Name: "ocp4-controller-service-account-ca", Rationale: "This rule is being checked on the Management cluster"},
2228+
{Name: "ocp4-controller-service-account-private-key", Rationale: "This rule is being checked on the Management cluster"},
2229+
{Name: "ocp4-controller-use-service-account", Rationale: "This rule is being checked on the Management cluster"},
2230+
{Name: "ocp4-etcd-auto-tls", Rationale: "This rule is being checked on the Management cluster"},
2231+
{Name: "ocp4-etcd-cert-file", Rationale: "This rule is being checked on the Management cluster"},
2232+
{Name: "ocp4-etcd-client-cert-auth", Rationale: "This rule is being checked on the Management cluster"},
2233+
{Name: "ocp4-etcd-key-file", Rationale: "This rule is being checked on the Management cluster"},
2234+
{Name: "ocp4-etcd-peer-auto-tls", Rationale: "This rule is being checked on the Management cluster"},
2235+
{Name: "ocp4-etcd-peer-cert-file", Rationale: "This rule is being checked on the Management cluster"},
2236+
{Name: "ocp4-etcd-peer-client-cert-auth", Rationale: "This rule is being checked on the Management cluster"},
2237+
{Name: "ocp4-etcd-peer-key-file", Rationale: "This rule is being checked on the Management cluster"},
2238+
{Name: "ocp4-kubelet-configure-tls-key", Rationale: "This rule is being checked on the Management cluster"},
2239+
{Name: "ocp4-kubelet-configure-tls-cert", Rationale: "This rule is being checked on the Management cluster"},
2240+
{Name: "ocp4-kubelet-disable-readonly-port", Rationale: "This rule is being checked on the Management cluster"},
2241+
{Name: "ocp4-api-server-admission-control-plugin-alwaysadmit", Rationale: "This rule is being checked on the Management cluster"},
2242+
{Name: "ocp4-api-server-admission-control-plugin-alwayspullimages", Rationale: "This rule is being checked on the Management cluster"},
2243+
{Name: "ocp4-api-server-admission-control-plugin-securitycontextdeny", Rationale: "This rule is being checked on the Management cluster"},
2244+
{Name: "ocp4-api-server-anonymous-auth", Rationale: "This rule is being checked on the Management cluster"},
2245+
{Name: "ocp4-api-server-api-priority-flowschema-catch-all", Rationale: "This rule is being checked on the Management cluster"},
2246+
{Name: "ocp4-api-server-auth-mode-no-aa", Rationale: "This rule is being checked on the Management cluster"},
2247+
{Name: "ocp4-api-server-basic-auth", Rationale: "This rule is being checked on the Management cluster"},
2248+
{Name: "ocp4-api-server-https-for-kubelet-conn", Rationale: "This rule is being checked on the Management cluster"},
2249+
{Name: "ocp4-api-server-insecure-bind-address", Rationale: "This rule is being checked on the Management cluster"},
2250+
{Name: "ocp4-api-server-no-adm-ctrl-plugins-disabled", Rationale: "This rule is being checked on the Management cluster"},
2251+
{Name: "ocp4-api-server-oauth-https-serving-cert", Rationale: "This rule is being checked on the Management cluster"},
2252+
{Name: "ocp4-api-server-openshift-https-serving-cert", Rationale: "This rule is being checked on the Management cluster"},
2253+
{Name: "ocp4-api-server-profiling-protected-by-rbac", Rationale: "This rule is being checked on the Management cluster"},
2254+
{Name: "ocp4-api-server-token-auth", Rationale: "This rule is being checked on the Management cluster"},
2255+
{Name: "ocp4-openshift-api-server-audit-log-path", Rationale: "This rule is being checked on the Management cluster"},
2256+
{Name: "ocp4-scheduler-no-bind-address", Rationale: "This rule is being checked on the Management cluster"},
2257+
{Name: "ocp4-tls-version-check-apiserver", Rationale: "This rule is being checked on the Management cluster"},
2258+
{Name: "ocp4-etcd-check-cipher-suite", Rationale: "This rule is being checked on the Management cluster"},
2259+
},
2260+
},
2261+
}
2262+
createTPErr := f.Client.Create(context.TODO(), tp, nil)
2263+
if createTPErr != nil {
2264+
t.Fatalf("failed to create TailoredProfile %s: %s", tp.Name, createTPErr)
2265+
}
2266+
defer f.Client.Delete(context.TODO(), tp)
2267+
2268+
// Wait for TailoredProfile to be ready
2269+
err = f.WaitForTailoredProfileStatus(f.OperatorNamespace, tpName, compv1alpha1.TailoredProfileStateReady)
2270+
if err != nil {
2271+
t.Fatalf("failed waiting for TailoredProfile to be ready: %s", err)
2272+
}
2273+
2274+
suiteName := framework.GetObjNameFromTest(t)
2275+
ssb := &compv1alpha1.ScanSettingBinding{
2276+
ObjectMeta: metav1.ObjectMeta{
2277+
Name: suiteName,
2278+
Namespace: f.OperatorNamespace,
2279+
},
2280+
Profiles: []compv1alpha1.NamedObjectReference{
2281+
{
2282+
APIGroup: "compliance.openshift.io/v1alpha1",
2283+
Kind: "TailoredProfile",
2284+
Name: tpName,
2285+
},
2286+
{
2287+
APIGroup: "compliance.openshift.io/v1alpha1",
2288+
Kind: "Profile",
2289+
Name: "ocp4-pci-dss-node",
2290+
},
2291+
},
2292+
SettingsRef: &compv1alpha1.NamedObjectReference{
2293+
APIGroup: "compliance.openshift.io/v1alpha1",
2294+
Kind: "ScanSetting",
2295+
Name: "default",
2296+
},
2297+
}
2298+
err = f.Client.Create(context.TODO(), ssb, nil)
2299+
if err != nil {
2300+
t.Fatalf("failed to create ScanSettingBinding: %s", err)
2301+
}
2302+
defer f.Client.Delete(context.TODO(), ssb)
2303+
2304+
// Wait for the suite to complete
2305+
err = f.WaitForSuiteScansStatus(f.OperatorNamespace, suiteName, compv1alpha1.PhaseDone, compv1alpha1.ResultNonCompliant)
2306+
if err != nil {
2307+
t.Fatalf("failed waiting for suite to complete: %s", err)
2308+
}
2309+
2310+
// Get the ComplianceSuite to verify it completed
2311+
suite := &compv1alpha1.ComplianceSuite{}
2312+
err = f.Client.Get(context.TODO(), types.NamespacedName{Name: suiteName, Namespace: f.OperatorNamespace}, suite)
2313+
if err != nil {
2314+
t.Fatalf("failed to get ComplianceSuite: %s", err)
2315+
}
2316+
2317+
if suite.Status.Phase != compv1alpha1.PhaseDone {
2318+
t.Fatalf("Expected suite phase to be DONE, got %s", suite.Status.Phase)
2319+
}
2320+
2321+
if suite.Status.Result != compv1alpha1.ResultNonCompliant {
2322+
t.Logf("Warning: Expected suite result to be NON-COMPLIANT, got %s", suite.Status.Result)
2323+
}
2324+
2325+
// Verify specific ComplianceCheckResults
2326+
// Check for configure-network-policies-namespaces rule (expected to FAIL on most clusters)
2327+
checkConfigureNetworkPolicies := compv1alpha1.ComplianceCheckResult{
2328+
ObjectMeta: metav1.ObjectMeta{
2329+
Name: fmt.Sprintf("%s-configure-network-policies-namespaces", tpName),
2330+
Namespace: f.OperatorNamespace,
2331+
},
2332+
}
2333+
err = f.Client.Get(context.TODO(), types.NamespacedName{
2334+
Name: checkConfigureNetworkPolicies.Name,
2335+
Namespace: f.OperatorNamespace,
2336+
}, &checkConfigureNetworkPolicies)
2337+
if err != nil {
2338+
t.Logf("Warning: Could not get check result for configure-network-policies-namespaces: %s", err)
2339+
} else {
2340+
t.Logf("ComplianceCheckResult %s status: %s", checkConfigureNetworkPolicies.Name, checkConfigureNetworkPolicies.Status)
2341+
if checkConfigureNetworkPolicies.Status != compv1alpha1.CheckResultFail {
2342+
t.Logf("Warning: Expected configure-network-policies-namespaces to FAIL, got %s", checkConfigureNetworkPolicies.Status)
2343+
}
2344+
}
2345+
2346+
// Check for kubeadmin-removed rule (status depends on whether kubeadmin secret exists)
2347+
kubeadminSecret := &corev1.Secret{}
2348+
kubeadminSecretErr := f.Client.Get(context.TODO(), types.NamespacedName{
2349+
Name: "kubeadmin",
2350+
Namespace: "kube-system",
2351+
}, kubeadminSecret)
2352+
2353+
checkKubeadminRemoved := compv1alpha1.ComplianceCheckResult{
2354+
ObjectMeta: metav1.ObjectMeta{
2355+
Name: fmt.Sprintf("%s-kubeadmin-removed", tpName),
2356+
Namespace: f.OperatorNamespace,
2357+
},
2358+
}
2359+
err = f.Client.Get(context.TODO(), types.NamespacedName{
2360+
Name: checkKubeadminRemoved.Name,
2361+
Namespace: f.OperatorNamespace,
2362+
}, &checkKubeadminRemoved)
2363+
if err != nil {
2364+
t.Logf("Warning: Could not get check result for kubeadmin-removed: %s", err)
2365+
} else {
2366+
t.Logf("ComplianceCheckResult %s status: %s", checkKubeadminRemoved.Name, checkKubeadminRemoved.Status)
2367+
// If kubeadmin secret doesn't exist, the check should PASS
2368+
// If kubeadmin secret exists, the check should FAIL
2369+
if kubeadminSecretErr != nil {
2370+
// Secret doesn't exist, check should PASS
2371+
if checkKubeadminRemoved.Status != compv1alpha1.CheckResultPass {
2372+
t.Logf("Warning: Expected kubeadmin-removed to PASS (secret not found), got %s", checkKubeadminRemoved.Status)
2373+
}
2374+
} else {
2375+
// Secret exists, check should FAIL
2376+
if checkKubeadminRemoved.Status != compv1alpha1.CheckResultFail {
2377+
t.Logf("Warning: Expected kubeadmin-removed to FAIL (secret exists), got %s", checkKubeadminRemoved.Status)
2378+
}
2379+
}
2380+
}
2381+
2382+
// Verify that ComplianceCheckResults exist for both scans
2383+
scanNames := []string{
2384+
tpName, // TailoredProfile scan
2385+
"ocp4-pci-dss-node", // Profile scan
2386+
}
2387+
2388+
for _, scanName := range scanNames {
2389+
checkResultList := &compv1alpha1.ComplianceCheckResultList{}
2390+
err = f.Client.List(context.TODO(), checkResultList, client.InNamespace(f.OperatorNamespace), client.MatchingLabels{
2391+
compv1alpha1.ComplianceScanLabel: scanName,
2392+
})
2393+
if err != nil {
2394+
t.Fatalf("failed to list ComplianceCheckResults for scan %s: %s", scanName, err)
2395+
}
2396+
2397+
if len(checkResultList.Items) == 0 {
2398+
t.Logf("Warning: No ComplianceCheckResults found for scan %s", scanName)
2399+
} else {
2400+
t.Logf("Found %d ComplianceCheckResults for scan %s", len(checkResultList.Items), scanName)
2401+
}
2402+
}
2403+
}
2404+
21672405
//testExecution{
21682406
// Name: "TestNodeSchedulingErrorFailsTheScan",
21692407
// IsParallel: false,

0 commit comments

Comments
 (0)