From f702abbdbfa5bb7b39e934e1454912435f99b3fe Mon Sep 17 00:00:00 2001 From: dawang Date: Fri, 6 Feb 2026 17:51:48 +0800 Subject: [PATCH 1/2] HYPERFLEET-590 - test: automate Clusters Resource Type - Workflow Validation case --- e2e/cluster/creation.go | 222 ++++++++++++------ pkg/helper/helper.go | 64 +++-- test-design/testcases/cluster.md | 57 +++-- .../{gcp.json => cluster-request.json} | 2 +- 4 files changed, 229 insertions(+), 116 deletions(-) rename testdata/payloads/clusters/{gcp.json => cluster-request.json} (94%) diff --git a/e2e/cluster/creation.go b/e2e/cluster/creation.go index 73cf879..0816301 100644 --- a/e2e/cluster/creation.go +++ b/e2e/cluster/creation.go @@ -1,88 +1,154 @@ package cluster import ( - "context" + "context" - "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" //nolint:staticcheck // dot import for test readability + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" //nolint:staticcheck // dot import for test readability - "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/helper" - "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/labels" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/client" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/helper" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/labels" ) -var lifecycleTestName = "[Suite: cluster][baseline] Full Cluster Creation Flow on GCP" +var lifecycleTestName = "[Suite: cluster][baseline] Clusters Resource Type - Workflow Validation" var _ = ginkgo.Describe(lifecycleTestName, - ginkgo.Label(labels.Tier0), - func() { - var h *helper.Helper - var clusterID string - - ginkgo.BeforeEach(func() { - h = helper.New() - }) - - ginkgo.It("should create GCP cluster and transition to Ready state with all adapters healthy", func(ctx context.Context) { - ginkgo.By("submitting cluster creation request via POST /api/hyperfleet/v1/clusters") - cluster, err := h.Client.CreateClusterFromPayload(ctx, "testdata/payloads/clusters/gcp.json") - Expect(err).NotTo(HaveOccurred(), "failed to create cluster") - - ginkgo.By("verifying API response (HTTP 201 Created)") - Expect(cluster.Id).NotTo(BeNil(), "cluster ID should be generated") - clusterID = *cluster.Id - ginkgo.GinkgoWriter.Printf("Created cluster ID: %s\n", clusterID) - - Expect(cluster.Status).NotTo(BeNil(), "cluster status should be present") - /** - Expect(cluster.Status.Phase).To(Equal(openapi.NotReady), "cluster should be in NotReady phase initially") - - Cluster final status depends on all deployed adapter result, this is still in progress. - Will update this part once adapter scope is finalized. - ginkgo.By("monitoring cluster status - waiting for phase transition to Ready") - err = h.WaitForClusterPhase(ctx, clusterID, openapi.Ready, h.Cfg.Timeouts.Cluster.Ready) - Expect(err).NotTo(HaveOccurred(), "cluster should reach Ready phase") - - ginkgo.By("verifying all adapter conditions via /clusters/{id}/statuses endpoint") - const expectedAdapterCount = 1 // GCP cluster expects 1 adapter - Eventually(func(g Gomega) { - statuses, err := h.Client.GetClusterStatuses(ctx, clusterID) - g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster statuses") - g.Expect(statuses.Items).To(HaveLen(expectedAdapterCount), - "expected %d adapter(s), got %d", expectedAdapterCount, len(statuses.Items)) - - for _, adapter := range statuses.Items { - hasApplied := h.HasCondition(adapter.Conditions, client.ConditionTypeApplied, openapi.True) - g.Expect(hasApplied).To(BeTrue(), - "adapter %s should have Applied=True", adapter.Adapter) - - hasAvailable := h.HasCondition(adapter.Conditions, client.ConditionTypeAvailable, openapi.True) - g.Expect(hasAvailable).To(BeTrue(), - "adapter %s should have Available=True", adapter.Adapter) - - hasHealth := h.HasCondition(adapter.Conditions, client.ConditionTypeHealth, openapi.True) - g.Expect(hasHealth).To(BeTrue(), - "adapter %s should have Health=True", adapter.Adapter) - } - }, h.Cfg.Timeouts.Adapter.Processing, h.Cfg.Polling.Interval).Should(Succeed()) - - ginkgo.By("verifying final cluster state") - finalCluster, err := h.Client.GetCluster(ctx, clusterID) - Expect(err).NotTo(HaveOccurred(), "failed to get final cluster state") - Expect(finalCluster.Status).NotTo(BeNil(), "cluster status should be present") - Expect(finalCluster.Status.Phase).To(Equal(openapi.Ready), "cluster phase should be Ready") - **/ - }) - - ginkgo.AfterEach(func(ctx context.Context) { - // Skip cleanup if helper not initialized or no cluster created - if h == nil || clusterID == "" { - return - } - - ginkgo.By("cleaning up cluster " + clusterID) - if err := h.CleanupTestCluster(ctx, clusterID); err != nil { - ginkgo.GinkgoWriter.Printf("Warning: failed to cleanup cluster %s: %v\n", clusterID, err) - } - }) - }, + ginkgo.Label(labels.Tier0), + func() { + var h *helper.Helper + var clusterID string + + ginkgo.BeforeEach(func() { + h = helper.New() + }) + + // This test validates the end-to-end cluster lifecycle workflow: + // 1. Cluster creation via API with initial condition validation + // 2. Workflow processing and Ready condition transition + // 3. Adapter execution with comprehensive metadata validation + // 4. Final cluster state verification + ginkgo.It("should validate complete workflow for clusters resource type from creation to Ready state", + func(ctx context.Context) { + ginkgo.By("Submit a \"clusters\" resource type request via API") + cluster, err := h.Client.CreateClusterFromPayload(ctx, "testdata/payloads/clusters/cluster-request.json") + Expect(err).NotTo(HaveOccurred(), "failed to create cluster") + Expect(cluster.Id).NotTo(BeNil(), "cluster ID should be generated") + clusterID = *cluster.Id + ginkgo.GinkgoWriter.Printf("Created cluster ID: %s\n", clusterID) + + Expect(cluster.Status).NotTo(BeNil(), "cluster status should be present") + + ginkgo.By("Verify initial status of cluster") + // Verify initial conditions are False, indicating workflow has not completed yet + // This ensures the cluster starts in the correct initial state + hasReadyFalse := h.HasResourceCondition(cluster.Status.Conditions, + client.ConditionTypeReady, openapi.ResourceConditionStatusFalse) + Expect(hasReadyFalse).To(BeTrue(), + "initial cluster conditions should have Ready=False") + + hasAvailableFalse := h.HasResourceCondition(cluster.Status.Conditions, + "Available", openapi.ResourceConditionStatusFalse) + Expect(hasAvailableFalse).To(BeTrue(), + "initial cluster conditions should have Available=False") + + ginkgo.By("Monitor cluster workflow processing") + err = h.WaitForClusterCondition( + ctx, + clusterID, + client.ConditionTypeReady, + openapi.ResourceConditionStatusTrue, + h.Cfg.Timeouts.Cluster.Ready, + ) + Expect(err).NotTo(HaveOccurred(), "cluster Ready condition should transition to True") + + ginkgo.By("Verify adapter execution results") + // Validate all adapters that executed have completed successfully + // The cluster Ready condition ensures all required adapters have finished + Eventually(func(g Gomega) { + statuses, err := h.Client.GetClusterStatuses(ctx, clusterID) + g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster statuses") + g.Expect(statuses.Items).NotTo(BeEmpty(), "at least one adapter should have executed") + + // Validate each adapter has required conditions with correct status + for _, adapter := range statuses.Items { + // Validate adapter-level metadata + g.Expect(adapter.CreatedTime).NotTo(BeZero(), + "adapter %s should have valid created_time", adapter.Adapter) + g.Expect(adapter.LastReportTime).NotTo(BeZero(), + "adapter %s should have valid last_report_time", adapter.Adapter) + g.Expect(adapter.ObservedGeneration).To(Equal(int32(1)), + "adapter %s should have observed_generation=1 for new creation request", adapter.Adapter) + + hasApplied := h.HasAdapterCondition( + adapter.Conditions, + client.ConditionTypeApplied, + openapi.AdapterConditionStatusTrue, + ) + g.Expect(hasApplied).To(BeTrue(), + "adapter %s should have Applied=True", adapter.Adapter) + + hasAvailable := h.HasAdapterCondition( + adapter.Conditions, + client.ConditionTypeAvailable, + openapi.AdapterConditionStatusTrue, + ) + g.Expect(hasAvailable).To(BeTrue(), + "adapter %s should have Available=True", adapter.Adapter) + + hasHealth := h.HasAdapterCondition( + adapter.Conditions, + client.ConditionTypeHealth, + openapi.AdapterConditionStatusTrue, + ) + g.Expect(hasHealth).To(BeTrue(), + "adapter %s should have Health=True", adapter.Adapter) + + // Validate condition metadata for each condition + for _, condition := range adapter.Conditions { + g.Expect(condition.Reason).NotTo(BeNil(), + "adapter %s condition %s should have non-nil reason", adapter.Adapter, condition.Type) + g.Expect(*condition.Reason).NotTo(BeEmpty(), + "adapter %s condition %s should have non-empty reason", adapter.Adapter, condition.Type) + + g.Expect(condition.Message).NotTo(BeNil(), + "adapter %s condition %s should have non-nil message", adapter.Adapter, condition.Type) + g.Expect(*condition.Message).NotTo(BeEmpty(), + "adapter %s condition %s should have non-empty message", adapter.Adapter, condition.Type) + + g.Expect(condition.LastTransitionTime).NotTo(BeZero(), + "adapter %s condition %s should have valid last_transition_time", adapter.Adapter, condition.Type) + } + } + }, h.Cfg.Timeouts.Adapter.Processing, h.Cfg.Polling.Interval).Should(Succeed()) + + ginkgo.By("Verify final cluster state") + // Retrieve the final cluster state and confirm both Ready and Available conditions are True + // This confirms the cluster has reached the desired end state + finalCluster, err := h.Client.GetCluster(ctx, clusterID) + Expect(err).NotTo(HaveOccurred(), "failed to get final cluster state") + Expect(finalCluster.Status).NotTo(BeNil(), "cluster status should be present") + + hasReady := h.HasResourceCondition(finalCluster.Status.Conditions, + client.ConditionTypeReady, openapi.ResourceConditionStatusTrue) + Expect(hasReady).To(BeTrue(), "cluster should have Ready=True condition") + + hasAvailable := h.HasResourceCondition(finalCluster.Status.Conditions, + "Available", openapi.ResourceConditionStatusTrue) + Expect(hasAvailable).To(BeTrue(), "cluster should have Available=True condition") + }) + + ginkgo.AfterEach(func(ctx context.Context) { + // Skip cleanup if helper not initialized or no cluster created + if h == nil || clusterID == "" { + return + } + + ginkgo.By("cleaning up cluster " + clusterID) + if err := h.CleanupTestCluster(ctx, clusterID); err != nil { + ginkgo.GinkgoWriter.Printf("Warning: failed to cleanup cluster %s: %v\n", clusterID, err) + } + }) + }, ) diff --git a/pkg/helper/helper.go b/pkg/helper/helper.go index 6719151..2d75951 100644 --- a/pkg/helper/helper.go +++ b/pkg/helper/helper.go @@ -1,46 +1,68 @@ package helper import ( - "context" - "fmt" + "context" + "fmt" + "os/exec" + "time" - "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/api/openapi" - "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/client" - "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/client" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/logger" ) // Helper provides utility functions for e2e tests type Helper struct { - Cfg *config.Config - Client *client.HyperFleetClient + Cfg *config.Config + Client *client.HyperFleetClient } // GetTestCluster creates a new temporary test cluster func (h *Helper) GetTestCluster(ctx context.Context, payloadPath string) (string, error) { - cluster, err := h.Client.CreateClusterFromPayload(ctx, payloadPath) - if err != nil { - return "", err - } - if cluster == nil { - return "", fmt.Errorf("CreateClusterFromPayload returned nil") - } - if cluster.Id == nil { - return "", fmt.Errorf("created cluster has no ID") - } - return *cluster.Id, nil + cluster, err := h.Client.CreateClusterFromPayload(ctx, payloadPath) + if err != nil { + return "", err + } + if cluster == nil { + return "", fmt.Errorf("CreateClusterFromPayload returned nil") + } + if cluster.Id == nil { + return "", fmt.Errorf("created cluster has no ID") + } + return *cluster.Id, nil } // CleanupTestCluster deletes the temporary test cluster +// TODO: Replace this workaround with API DELETE once HyperFleet API supports +// DELETE operations for clusters resource type: +// return h.Client.DeleteCluster(ctx, clusterID) +// Current workaround: Delete the Kubernetes namespace using kubectl func (h *Helper) CleanupTestCluster(ctx context.Context, clusterID string) error { - return h.Client.DeleteCluster(ctx, clusterID) + logger.Info("deleting cluster namespace (workaround)", "cluster_id", clusterID, "namespace", clusterID) + + // Create context with timeout for kubectl command + cmdCtx, cancel := context.WithTimeout(ctx, 2*time.Minute) + defer cancel() + + // Execute kubectl delete namespace command + cmd := exec.CommandContext(cmdCtx, "kubectl", "delete", "namespace", clusterID) + output, err := cmd.CombinedOutput() + if err != nil { + logger.Error("failed to delete cluster namespace", "cluster_id", clusterID, "error", err, "output", string(output)) + return fmt.Errorf("failed to delete namespace %s: %w (output: %s)", clusterID, err, string(output)) + } + + logger.Info("successfully deleted cluster namespace", "cluster_id", clusterID, "output", string(output)) + return nil } // GetTestNodePool creates a nodepool on the specified cluster from a payload file func (h *Helper) GetTestNodePool(ctx context.Context, clusterID, payloadPath string) (*openapi.NodePool, error) { - return h.Client.CreateNodePoolFromPayload(ctx, clusterID, payloadPath) + return h.Client.CreateNodePoolFromPayload(ctx, clusterID, payloadPath) } // CleanupTestNodePool cleans up test nodepool func (h *Helper) CleanupTestNodePool(ctx context.Context, clusterID, nodepoolID string) error { - return h.Client.DeleteNodePool(ctx, clusterID, nodepoolID) + return h.Client.DeleteNodePool(ctx, clusterID, nodepoolID) } diff --git a/test-design/testcases/cluster.md b/test-design/testcases/cluster.md index 4e347cd..9575d46 100644 --- a/test-design/testcases/cluster.md +++ b/test-design/testcases/cluster.md @@ -39,6 +39,7 @@ This test validates that the workflow can work correctly for clusters resource t ### Test Steps #### Step 1: Submit a "clusters" resource type request via API + **Action:** - Submit a POST request for "clusters" resource type: ```bash @@ -51,7 +52,30 @@ curl -X POST ${API_URL}/api/hyperfleet/v1/clusters \ - Response includes the created cluster ID and initial metadata - Initial cluster conditions have `status: False` for both condition `{"type": "Ready"}` and `{"type": "Available"}` -#### Step 2: Verify adapter status +#### Step 2: Verify initial status of cluster +**Action:** +- Poll cluster status for initial response +```bash +curl -X GET ${API_URL}/api/hyperfleet/v1/clusters/{cluster_id} +``` + +**Expected Result:** +- Cluster `Ready` condition `status: False` +- Cluster `Available` condition `status: False` + +#### Step 3: Monitor cluster workflow processing + +**Action:** +- Poll cluster status to monitor workflow processing: +```bash +curl -X GET ${API_URL}/api/hyperfleet/v1/clusters/{cluster_id} +``` + +**Expected Result:** +- Cluster `Ready` condition transitions from `status: False` to `status: True` +- This indicates the workflow has processed the cluster request and configured adapters are executing + +#### Step 4: Verify adapter execution results **Action:** - Retrieve adapter statuses information: @@ -61,29 +85,30 @@ curl -X GET ${API_URL}/api/hyperfleet/v1/clusters/{cluster_id}/statuses **Expected Result:** - Response returns HTTP 200 (OK) status code -- Adapter status payload contains the following: - -**Condition Types:** -- All required condition types are present: `Applied`, `Available`, `Health` -- Each condition has `status: "True"` when successful -- `reason`: Human-readable summary of the condition state -- `message`: Detailed human-readable description -- `created_time`: Timestamp when the condition was first created -- `last_transition_time`: Timestamp of the last status change -- `last_updated_time`: Timestamp of the most recent update -- `observed_generation`: Set to `1` for the initial cluster generation - -#### Step 3: Verify cluster final status +- Each adapter has all required condition types: `Applied`, `Available`, `Health` +- Each condition has `status: "True"` indicating successful execution +- **Adapter condition metadata validation** (for each condition in adapter.conditions): + - `reason`: Non-empty string providing human-readable summary of the condition state + - `message`: Non-empty string with detailed human-readable description + - `last_transition_time`: Valid RFC3339 timestamp of the last status change +- **Adapter status metadata validation** (for each adapter): + - `created_time`: Valid RFC3339 timestamp when the adapter status was first created + - `last_report_time`: Valid RFC3339 timestamp when the adapter last reported its status + - `observed_generation`: Non-nil integer value equal to 1 for new creation requests + +#### Step 5: Verify final cluster state **Action:** -- Retrieve cluster status information: +- Retrieve final cluster status information: ```bash curl -X GET ${API_URL}/api/hyperfleet/v1/clusters/{cluster_id} ``` + **Expected Result:** - Final cluster conditions have `status: True` for both condition `{"type": "Ready"}` and `{"type": "Available"}` +- This confirms the cluster has reached the desired end state -#### Step 4: Cleanup resources +#### Step 5: Cleanup resources **Action:** - Delete the namespace created for this cluster: diff --git a/testdata/payloads/clusters/gcp.json b/testdata/payloads/clusters/cluster-request.json similarity index 94% rename from testdata/payloads/clusters/gcp.json rename to testdata/payloads/clusters/cluster-request.json index 51f96a5..7a33d4d 100644 --- a/testdata/payloads/clusters/gcp.json +++ b/testdata/payloads/clusters/cluster-request.json @@ -1,6 +1,6 @@ { "kind": "Cluster", - "name": "hp-gcp-cluster-{{.Random}}", + "name": "hp-cluster-{{.Random}}", "labels": { "environment": "production", "team": "platform" From 855c70d706ad218a89d5cc9dbb05717120f3fe46 Mon Sep 17 00:00:00 2001 From: dawang Date: Tue, 10 Feb 2026 10:42:13 +0800 Subject: [PATCH 2/2] HYPERFLEET-590 - test: add the input configs to define the required adapters for clusters and nodepools and refine with review comments --- configs/config.yaml | 21 + e2e/cluster/creation.go | 59 ++- pkg/config/config.go | 467 +++++++++--------- pkg/config/defaults.go | 67 ++- test-design/testcases/cluster.md | 53 +- test-design/testcases/nodepool.md | 8 +- .../payloads/clusters/cluster-request.json | 1 + 7 files changed, 377 insertions(+), 299 deletions(-) diff --git a/configs/config.yaml b/configs/config.yaml index b992697..0ebdd3a 100644 --- a/configs/config.yaml +++ b/configs/config.yaml @@ -85,3 +85,24 @@ log: # Log output: stdout, stderr # Can be overridden by: HYPERFLEET_LOG_OUTPUT output: stdout + +# ============================================================================ +# Adapter Configuration +# ============================================================================ + +adapters: + # Required adapters for cluster resources + # List of adapter names that must be present and have correct conditions + # when validating cluster adapter execution + # + # Can be overridden by: HYPERFLEET_ADAPTERS_CLUSTER (comma-separated) + cluster: + - "cl-namespace" + + # Required adapters for nodepool resources + # List of adapter names that must be present and have correct conditions + # when validating nodepool adapter execution + # + # Can be overridden by: HYPERFLEET_ADAPTERS_NODEPOOL (comma-separated) + nodepool: + - "np-configmap" diff --git a/e2e/cluster/creation.go b/e2e/cluster/creation.go index 0816301..8c4a75b 100644 --- a/e2e/cluster/creation.go +++ b/e2e/cluster/creation.go @@ -26,18 +26,16 @@ var _ = ginkgo.Describe(lifecycleTestName, // This test validates the end-to-end cluster lifecycle workflow: // 1. Cluster creation via API with initial condition validation - // 2. Workflow processing and Ready condition transition - // 3. Adapter execution with comprehensive metadata validation - // 4. Final cluster state verification + // 2. Required adapter execution with comprehensive metadata validation + // 3. Final cluster state verification (Ready and Available conditions) ginkgo.It("should validate complete workflow for clusters resource type from creation to Ready state", func(ctx context.Context) { - ginkgo.By("Submit a \"clusters\" resource type request via API") + ginkgo.By("Submit an API request to create a Cluster resource") cluster, err := h.Client.CreateClusterFromPayload(ctx, "testdata/payloads/clusters/cluster-request.json") Expect(err).NotTo(HaveOccurred(), "failed to create cluster") Expect(cluster.Id).NotTo(BeNil(), "cluster ID should be generated") clusterID = *cluster.Id ginkgo.GinkgoWriter.Printf("Created cluster ID: %s\n", clusterID) - Expect(cluster.Status).NotTo(BeNil(), "cluster status should be present") ginkgo.By("Verify initial status of cluster") @@ -49,30 +47,30 @@ var _ = ginkgo.Describe(lifecycleTestName, "initial cluster conditions should have Ready=False") hasAvailableFalse := h.HasResourceCondition(cluster.Status.Conditions, - "Available", openapi.ResourceConditionStatusFalse) + client.ConditionTypeAvailable, openapi.ResourceConditionStatusFalse) Expect(hasAvailableFalse).To(BeTrue(), "initial cluster conditions should have Available=False") - ginkgo.By("Monitor cluster workflow processing") - err = h.WaitForClusterCondition( - ctx, - clusterID, - client.ConditionTypeReady, - openapi.ResourceConditionStatusTrue, - h.Cfg.Timeouts.Cluster.Ready, - ) - Expect(err).NotTo(HaveOccurred(), "cluster Ready condition should transition to True") - - ginkgo.By("Verify adapter execution results") - // Validate all adapters that executed have completed successfully - // The cluster Ready condition ensures all required adapters have finished + ginkgo.By("Verify required adapter execution results") + // Validate required adapters from config have completed successfully + // If an adapter fails, we can identify which specific adapter failed Eventually(func(g Gomega) { statuses, err := h.Client.GetClusterStatuses(ctx, clusterID) g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster statuses") g.Expect(statuses.Items).NotTo(BeEmpty(), "at least one adapter should have executed") - // Validate each adapter has required conditions with correct status + // Build a map of adapter statuses for easy lookup + adapterMap := make(map[string]openapi.AdapterStatus) for _, adapter := range statuses.Items { + adapterMap[adapter.Adapter] = adapter + } + + // Validate each required adapter from config + for _, requiredAdapter := range h.Cfg.Adapters.Cluster { + adapter, exists := adapterMap[requiredAdapter] + g.Expect(exists).To(BeTrue(), + "required adapter %s should be present in adapter statuses", requiredAdapter) + // Validate adapter-level metadata g.Expect(adapter.CreatedTime).NotTo(BeZero(), "adapter %s should have valid created_time", adapter.Adapter) @@ -124,8 +122,17 @@ var _ = ginkgo.Describe(lifecycleTestName, }, h.Cfg.Timeouts.Adapter.Processing, h.Cfg.Polling.Interval).Should(Succeed()) ginkgo.By("Verify final cluster state") - // Retrieve the final cluster state and confirm both Ready and Available conditions are True + // Wait for cluster Ready condition and verify both Ready and Available conditions are True // This confirms the cluster has reached the desired end state + err = h.WaitForClusterCondition( + ctx, + clusterID, + client.ConditionTypeReady, + openapi.ResourceConditionStatusTrue, + h.Cfg.Timeouts.Cluster.Ready, + ) + Expect(err).NotTo(HaveOccurred(), "cluster Ready condition should transition to True") + finalCluster, err := h.Client.GetCluster(ctx, clusterID) Expect(err).NotTo(HaveOccurred(), "failed to get final cluster state") Expect(finalCluster.Status).NotTo(BeNil(), "cluster status should be present") @@ -135,8 +142,16 @@ var _ = ginkgo.Describe(lifecycleTestName, Expect(hasReady).To(BeTrue(), "cluster should have Ready=True condition") hasAvailable := h.HasResourceCondition(finalCluster.Status.Conditions, - "Available", openapi.ResourceConditionStatusTrue) + client.ConditionTypeAvailable, openapi.ResourceConditionStatusTrue) Expect(hasAvailable).To(BeTrue(), "cluster should have Available=True condition") + + // Validate observedGeneration for Ready and Available conditions + for _, condition := range finalCluster.Status.Conditions { + if condition.Type == client.ConditionTypeReady || condition.Type == client.ConditionTypeAvailable { + Expect(condition.ObservedGeneration).To(Equal(int32(1)), + "cluster condition %s should have observed_generation=1 for new creation request", condition.Type) + } + } }) ginkgo.AfterEach(func(ctx context.Context) { diff --git a/pkg/config/config.go b/pkg/config/config.go index c545874..74e6e9d 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -1,326 +1,353 @@ package config import ( - "fmt" - "log/slog" - "net/url" - "reflect" - "time" + "fmt" + "log/slog" + "net/url" + "reflect" + "time" - "github.com/spf13/viper" + "github.com/spf13/viper" ) const ( - // TagMapstructure is the struct tag used by Viper for configuration mapping - TagMapstructure = "mapstructure" + // TagMapstructure is the struct tag used by Viper for configuration mapping + TagMapstructure = "mapstructure" - // EnvPrefix is the prefix for all environment variables (without trailing underscore) - // Viper automatically adds underscore when using SetEnvPrefix() - EnvPrefix = "HYPERFLEET" + // EnvPrefix is the prefix for all environment variables (without trailing underscore) + // Viper automatically adds underscore when using SetEnvPrefix() + EnvPrefix = "HYPERFLEET" - // RedactedPlaceholder is used to mask sensitive information in logs - RedactedPlaceholder = "**REDACTED**" + // RedactedPlaceholder is used to mask sensitive information in logs + RedactedPlaceholder = "**REDACTED**" - // NotSetPlaceholder indicates a configuration value has not been set - NotSetPlaceholder = "" + // NotSetPlaceholder indicates a configuration value has not been set + NotSetPlaceholder = "" ) // EnvVar constructs an environment variable name with the HYPERFLEET prefix // Example: EnvVar("LOG_LEVEL") returns "HYPERFLEET_LOG_LEVEL" func EnvVar(name string) string { - return EnvPrefix + "_" + name + return EnvPrefix + "_" + name } // API config keys var API = struct { - // URL is the HyperFleet API base URL - // Env: HYPERFLEET_API_URL - URL string + // URL is the HyperFleet API base URL + // Env: HYPERFLEET_API_URL + URL string }{ - URL: "api.url", + URL: "api.url", } // Tests config keys for Ginkgo test execution var Tests = struct { - // GinkgoLabelFilter is the label filter for Ginkgo tests - // Env: GINKGO_LABEL_FILTER - GinkgoLabelFilter string + // GinkgoLabelFilter is the label filter for Ginkgo tests + // Env: GINKGO_LABEL_FILTER + GinkgoLabelFilter string - // GinkgoFocus is a regex to focus on specific tests - // Env: GINKGO_FOCUS - GinkgoFocus string + // GinkgoFocus is a regex to focus on specific tests + // Env: GINKGO_FOCUS + GinkgoFocus string - // GinkgoSkip is a regex to skip specific tests - // Env: GINKGO_SKIP - GinkgoSkip string + // GinkgoSkip is a regex to skip specific tests + // Env: GINKGO_SKIP + GinkgoSkip string - // SuiteTimeout is the timeout for the entire test suite (Go duration format: "2h", "90m", etc.) - // Env: SUITE_TIMEOUT - SuiteTimeout string + // SuiteTimeout is the timeout for the entire test suite (Go duration format: "2h", "90m", etc.) + // Env: SUITE_TIMEOUT + SuiteTimeout string - // JUnitReportPath is the path to write JUnit XML report - // Env: JUNIT_REPORT_PATH - JUnitReportPath string + // JUnitReportPath is the path to write JUnit XML report + // Env: JUNIT_REPORT_PATH + JUnitReportPath string }{ - GinkgoLabelFilter: "tests.ginkgoLabelFilter", - GinkgoFocus: "tests.focus", - GinkgoSkip: "tests.ginkgoSkip", - SuiteTimeout: "tests.suiteTimeout", - JUnitReportPath: "tests.junitReportPath", + GinkgoLabelFilter: "tests.ginkgoLabelFilter", + GinkgoFocus: "tests.focus", + GinkgoSkip: "tests.ginkgoSkip", + SuiteTimeout: "tests.suiteTimeout", + JUnitReportPath: "tests.junitReportPath", } // Log config keys var Log = struct { - // Level is the minimum log level - // Env: HYPERFLEET_LOG_LEVEL - Level string + // Level is the minimum log level + // Env: HYPERFLEET_LOG_LEVEL + Level string - // Format is the log output format - // Env: HYPERFLEET_LOG_FORMAT - Format string + // Format is the log output format + // Env: HYPERFLEET_LOG_FORMAT + Format string - // Output is the log destination - // Env: HYPERFLEET_LOG_OUTPUT - Output string + // Output is the log destination + // Env: HYPERFLEET_LOG_OUTPUT + Output string }{ - Level: "log.level", - Format: "log.format", - Output: "log.output", + Level: "log.level", + Format: "log.format", + Output: "log.output", +} + +// AdaptersConfig contains required adapters for each resource type +type AdaptersConfig struct { + Cluster []string `yaml:"cluster" mapstructure:"cluster"` // Required adapters for cluster resources + NodePool []string `yaml:"nodepool" mapstructure:"nodepool"` // Required adapters for nodepool resources } // Config represents the e2e test configuration type Config struct { - API APIConfig `yaml:"api" mapstructure:"api"` - Timeouts TimeoutsConfig `yaml:"timeouts" mapstructure:"timeouts"` - Polling PollingConfig `yaml:"polling" mapstructure:"polling"` - Log LogConfig `yaml:"log" mapstructure:"log"` + API APIConfig `yaml:"api" mapstructure:"api"` + Timeouts TimeoutsConfig `yaml:"timeouts" mapstructure:"timeouts"` + Polling PollingConfig `yaml:"polling" mapstructure:"polling"` + Log LogConfig `yaml:"log" mapstructure:"log"` + Adapters AdaptersConfig `yaml:"adapters" mapstructure:"adapters"` } // APIConfig contains API-related configuration type APIConfig struct { - URL string `yaml:"url" mapstructure:"url"` + URL string `yaml:"url" mapstructure:"url"` } // TimeoutsConfig contains timeout configurations type TimeoutsConfig struct { - Cluster ClusterTimeouts `yaml:"cluster" mapstructure:"cluster"` - NodePool NodePoolTimeouts `yaml:"nodepool" mapstructure:"nodepool"` - Adapter AdapterTimeouts `yaml:"adapter" mapstructure:"adapter"` + Cluster ClusterTimeouts `yaml:"cluster" mapstructure:"cluster"` + NodePool NodePoolTimeouts `yaml:"nodepool" mapstructure:"nodepool"` + Adapter AdapterTimeouts `yaml:"adapter" mapstructure:"adapter"` } // ClusterTimeouts contains cluster-related timeouts type ClusterTimeouts struct { - Ready time.Duration `yaml:"ready" mapstructure:"ready"` + Ready time.Duration `yaml:"ready" mapstructure:"ready"` } // NodePoolTimeouts contains nodepool-related timeouts type NodePoolTimeouts struct { - Ready time.Duration `yaml:"ready" mapstructure:"ready"` + Ready time.Duration `yaml:"ready" mapstructure:"ready"` } // AdapterTimeouts contains adapter-related timeouts type AdapterTimeouts struct { - Processing time.Duration `yaml:"processing" mapstructure:"processing"` + Processing time.Duration `yaml:"processing" mapstructure:"processing"` } // PollingConfig contains polling configuration type PollingConfig struct { - Interval time.Duration `yaml:"interval" mapstructure:"interval"` + Interval time.Duration `yaml:"interval" mapstructure:"interval"` } // LogConfig contains logging configuration type LogConfig struct { - Level string `yaml:"level" mapstructure:"level"` // debug, info, warn, error - Format string `yaml:"format" mapstructure:"format"` // text, json - Output string `yaml:"output" mapstructure:"output"` // stdout, stderr + Level string `yaml:"level" mapstructure:"level"` // debug, info, warn, error + Format string `yaml:"format" mapstructure:"format"` // text, json + Output string `yaml:"output" mapstructure:"output"` // stdout, stderr } // Load loads configuration from viper with improved validation func Load() (*Config, error) { - cfg := &Config{} + cfg := &Config{} - // Use Unmarshal (not UnmarshalExact) to allow runtime test parameters (tests.*) - // to coexist with persistent configuration. Test parameters (label-filter, focus, skip) - // are set via flags/env vars and should not appear in config files. - if err := viper.Unmarshal(cfg); err != nil { - return nil, fmt.Errorf("configuration error: %w\nPlease check your config file", err) - } + // Use Unmarshal (not UnmarshalExact) to allow runtime test parameters (tests.*) + // to coexist with persistent configuration. Test parameters (label-filter, focus, skip) + // are set via flags/env vars and should not appear in config files. + if err := viper.Unmarshal(cfg); err != nil { + return nil, fmt.Errorf("configuration error: %w\nPlease check your config file", err) + } - // WORKAROUND: viper.Unmarshal doesn't always respect env var bindings for nested structs - // Use reflection to automatically apply all values from viper to the config struct - applyViperValues(reflect.ValueOf(cfg).Elem(), "") + // WORKAROUND: viper.Unmarshal doesn't always respect env var bindings for nested structs + // Use reflection to automatically apply all values from viper to the config struct + applyViperValues(reflect.ValueOf(cfg).Elem(), "") - // Apply defaults - cfg.applyDefaults() + // Apply defaults + cfg.applyDefaults() - // Validate with detailed errors - if err := cfg.Validate(); err != nil { - return nil, err - } + // Validate with detailed errors + if err := cfg.Validate(); err != nil { + return nil, err + } - // Note: Display() is called after logger initialization in e2e.RunTests() - // to ensure structured logging is properly configured + // Note: Display() is called after logger initialization in e2e.RunTests() + // to ensure structured logging is properly configured - return cfg, nil + return cfg, nil } // applyViperValues recursively applies values from viper to the config struct using reflection // This ensures environment variables and flags properly override config file values func applyViperValues(v reflect.Value, prefix string) { - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - fieldType := t.Field(i) - - tag := fieldType.Tag.Get(TagMapstructure) - if tag == "" { - continue - } - - var configPath string - if prefix == "" { - configPath = tag - } else { - configPath = prefix + "." + tag - } - - if field.Kind() == reflect.Struct && field.Type() != reflect.TypeOf(time.Duration(0)) { - applyViperValues(field, configPath) - continue - } - - if !field.CanSet() { - continue - } - - // Apply value from viper based on field type - switch field.Kind() { - case reflect.String: - if viperVal := viper.GetString(configPath); viperVal != "" { - field.SetString(viperVal) - } - case reflect.Bool: - // For bool, only apply if the key is explicitly set in viper - // This preserves the priority order: flags > env > config > defaults - if viper.IsSet(configPath) { - field.SetBool(viper.GetBool(configPath)) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // Special handling for time.Duration (which is int64) - if field.Type() == reflect.TypeOf(time.Duration(0)) { - if viperVal := viper.GetDuration(configPath); viperVal != 0 { - field.SetInt(int64(viperVal)) - } - } else { - if viperVal := viper.GetInt64(configPath); viperVal != 0 { - field.SetInt(viperVal) - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if viperVal := viper.GetUint64(configPath); viperVal != 0 { - field.SetUint(viperVal) - } - case reflect.Float32, reflect.Float64: - if viperVal := viper.GetFloat64(configPath); viperVal != 0 { - field.SetFloat(viperVal) - } - } - } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + fieldType := t.Field(i) + + tag := fieldType.Tag.Get(TagMapstructure) + if tag == "" { + continue + } + + var configPath string + if prefix == "" { + configPath = tag + } else { + configPath = prefix + "." + tag + } + + if field.Kind() == reflect.Struct && field.Type() != reflect.TypeOf(time.Duration(0)) { + applyViperValues(field, configPath) + continue + } + + if !field.CanSet() { + continue + } + + // Apply value from viper based on field type + switch field.Kind() { + case reflect.String: + if viperVal := viper.GetString(configPath); viperVal != "" { + field.SetString(viperVal) + } + case reflect.Bool: + // For bool, only apply if the key is explicitly set in viper + // This preserves the priority order: flags > env > config > defaults + if viper.IsSet(configPath) { + field.SetBool(viper.GetBool(configPath)) + } + case reflect.Slice: + // Handle string slices + if field.Type().Elem().Kind() == reflect.String { + if viper.IsSet(configPath) { + viperVal := viper.GetStringSlice(configPath) + if len(viperVal) > 0 { + field.Set(reflect.ValueOf(viperVal)) + } + } + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // Special handling for time.Duration (which is int64) + if field.Type() == reflect.TypeOf(time.Duration(0)) { + if viperVal := viper.GetDuration(configPath); viperVal != 0 { + field.SetInt(int64(viperVal)) + } + } else { + if viperVal := viper.GetInt64(configPath); viperVal != 0 { + field.SetInt(viperVal) + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if viperVal := viper.GetUint64(configPath); viperVal != 0 { + field.SetUint(viperVal) + } + case reflect.Float32, reflect.Float64: + if viperVal := viper.GetFloat64(configPath); viperVal != 0 { + field.SetFloat(viperVal) + } + } + } } // applyDefaults applies default values for unset fields func (c *Config) applyDefaults() { - // Apply timeout defaults - if c.Timeouts.Cluster.Ready == 0 { - c.Timeouts.Cluster.Ready = DefaultClusterReadyTimeout - } - if c.Timeouts.NodePool.Ready == 0 { - c.Timeouts.NodePool.Ready = DefaultNodePoolReadyTimeout - } - if c.Timeouts.Adapter.Processing == 0 { - c.Timeouts.Adapter.Processing = DefaultAdapterProcessingTimeout - } - if c.Polling.Interval == 0 { - c.Polling.Interval = DefaultPollInterval - } - - // Apply log defaults - if c.Log.Level == "" { - c.Log.Level = DefaultLogLevel - } - if c.Log.Format == "" { - c.Log.Format = DefaultLogFormat - } - if c.Log.Output == "" { - c.Log.Output = DefaultLogOutput - } + // Apply timeout defaults + if c.Timeouts.Cluster.Ready == 0 { + c.Timeouts.Cluster.Ready = DefaultClusterReadyTimeout + } + if c.Timeouts.NodePool.Ready == 0 { + c.Timeouts.NodePool.Ready = DefaultNodePoolReadyTimeout + } + if c.Timeouts.Adapter.Processing == 0 { + c.Timeouts.Adapter.Processing = DefaultAdapterProcessingTimeout + } + if c.Polling.Interval == 0 { + c.Polling.Interval = DefaultPollInterval + } + + // Apply log defaults + if c.Log.Level == "" { + c.Log.Level = DefaultLogLevel + } + if c.Log.Format == "" { + c.Log.Format = DefaultLogFormat + } + if c.Log.Output == "" { + c.Log.Output = DefaultLogOutput + } + + // Apply adapter defaults + if c.Adapters.Cluster == nil { + c.Adapters.Cluster = DefaultClusterAdapters + } + if c.Adapters.NodePool == nil { + c.Adapters.NodePool = DefaultNodePoolAdapters + } } // Validate validates configuration with detailed error messages func (c *Config) Validate() error { - // Validate API URL requirement - if c.API.URL == "" { - return fmt.Errorf(`configuration validation failed: + // Validate API URL requirement + if c.API.URL == "" { + return fmt.Errorf(`configuration validation failed: - Field 'Config.API.URL' is required Please provide API URL (in order of priority): • Flag: --api-url • Environment variable: HYPERFLEET_API_URL • Config file: api.url: `) - } + } - return nil + return nil } // Display logs the merged configuration using structured logging func (c *Config) Display() { - slog.Info("Loaded configuration", - "api_url", redactURL(c.API.URL), - "timeout_cluster_ready", c.Timeouts.Cluster.Ready, - "timeout_nodepool_ready", c.Timeouts.NodePool.Ready, - "timeout_adapter_processing", c.Timeouts.Adapter.Processing, - "polling_interval", c.Polling.Interval, - "log_level", c.Log.Level, - "log_format", c.Log.Format, - "log_output", c.Log.Output, - ) + slog.Info("Loaded configuration", + "api_url", redactURL(c.API.URL), + "timeout_cluster_ready", c.Timeouts.Cluster.Ready, + "timeout_nodepool_ready", c.Timeouts.NodePool.Ready, + "timeout_adapter_processing", c.Timeouts.Adapter.Processing, + "polling_interval", c.Polling.Interval, + "log_level", c.Log.Level, + "log_format", c.Log.Format, + "log_output", c.Log.Output, + "adapters_cluster", c.Adapters.Cluster, + "adapters_nodepool", c.Adapters.NodePool, + ) } // redactURL redacts credentials from URLs func redactURL(rawURL string) string { - if rawURL == "" { - return NotSetPlaceholder - } - - // Parse the URL to safely handle credentials - u, err := url.Parse(rawURL) - if err != nil { - // If parsing fails, redact entirely for safety - return RedactedPlaceholder - } - - // If URL contains user credentials, redact them - if u.User != nil { - // Clear the User field and manually build the redacted URL - u.User = nil - redactedURL := u.String() - - // Insert RedactedPlaceholder after the scheme:// - if u.Scheme != "" { - redactedURL = u.Scheme + "://" + RedactedPlaceholder + "@" + u.Host - if u.Path != "" { - redactedURL += u.Path - } - if u.RawQuery != "" { - redactedURL += "?" + u.RawQuery - } - if u.Fragment != "" { - redactedURL += "#" + u.Fragment - } - } - return redactedURL - } - - // Return the URL as-is if no credentials present - return u.String() + if rawURL == "" { + return NotSetPlaceholder + } + + // Parse the URL to safely handle credentials + u, err := url.Parse(rawURL) + if err != nil { + // If parsing fails, redact entirely for safety + return RedactedPlaceholder + } + + // If URL contains user credentials, redact them + if u.User != nil { + // Clear the User field and manually build the redacted URL + u.User = nil + redactedURL := u.String() + + // Insert RedactedPlaceholder after the scheme:// + if u.Scheme != "" { + redactedURL = u.Scheme + "://" + RedactedPlaceholder + "@" + u.Host + if u.Path != "" { + redactedURL += u.Path + } + if u.RawQuery != "" { + redactedURL += "?" + u.RawQuery + } + if u.Fragment != "" { + redactedURL += "#" + u.Fragment + } + } + return redactedURL + } + + // Return the URL as-is if no credentials present + return u.String() } diff --git a/pkg/config/defaults.go b/pkg/config/defaults.go index e1cc7ae..b7a5344 100644 --- a/pkg/config/defaults.go +++ b/pkg/config/defaults.go @@ -4,51 +4,66 @@ import "time" // Log level constants const ( - // LogLevelDebug enables detailed test steps and all framework internal logs - LogLevelDebug = "debug" + // LogLevelDebug enables detailed test steps and all framework internal logs + LogLevelDebug = "debug" - // LogLevelInfo enables detailed test steps and high-level framework logs (default) - LogLevelInfo = "info" + // LogLevelInfo enables detailed test steps and high-level framework logs (default) + LogLevelInfo = "info" - // LogLevelWarn shows only warnings and errors (minimal output for CI/CD) - LogLevelWarn = "warn" + // LogLevelWarn shows only warnings and errors (minimal output for CI/CD) + LogLevelWarn = "warn" - // LogLevelError shows only errors (absolute minimal output) - LogLevelError = "error" + // LogLevelError shows only errors (absolute minimal output) + LogLevelError = "error" ) // Log format constants const ( - LogFormatJSON = "json" - LogFormatText = "text" + LogFormatJSON = "json" + LogFormatText = "text" ) // Log output constants const ( - LogOutputStdout = "stdout" - LogOutputStderr = "stderr" + LogOutputStdout = "stdout" + LogOutputStderr = "stderr" ) // Default timeout values const ( - // DefaultClusterReadyTimeout is the default timeout for waiting for a cluster to become ready - DefaultClusterReadyTimeout = 30 * time.Minute + // DefaultClusterReadyTimeout is the default timeout for waiting for a cluster to become ready + DefaultClusterReadyTimeout = 30 * time.Minute - // DefaultNodePoolReadyTimeout is the default timeout for waiting for a nodepool to become ready - DefaultNodePoolReadyTimeout = 30 * time.Minute + // DefaultNodePoolReadyTimeout is the default timeout for waiting for a nodepool to become ready + DefaultNodePoolReadyTimeout = 30 * time.Minute - // DefaultAdapterProcessingTimeout is the default timeout for waiting for adapter conditions - DefaultAdapterProcessingTimeout = 5 * time.Minute + // DefaultAdapterProcessingTimeout is the default timeout for waiting for adapter conditions + DefaultAdapterProcessingTimeout = 5 * time.Minute - // DefaultPollInterval is the default interval for polling operations - DefaultPollInterval = 10 * time.Second + // DefaultPollInterval is the default interval for polling operations + DefaultPollInterval = 10 * time.Second - // DefaultLogLevel is the default log level - DefaultLogLevel = LogLevelInfo + // DefaultLogLevel is the default log level + DefaultLogLevel = LogLevelInfo - // DefaultLogFormat is the default log format - DefaultLogFormat = LogFormatText + // DefaultLogFormat is the default log format + DefaultLogFormat = LogFormatText - // DefaultLogOutput is the default log output - DefaultLogOutput = LogOutputStdout + // DefaultLogOutput is the default log output + DefaultLogOutput = LogOutputStdout +) + +// Default required adapters for resource types +var ( + // DefaultClusterAdapters is the default list of required adapters for cluster resources + DefaultClusterAdapters = []string{ + "clusters-namespace", + "clusters-job", + "clusters-deployment", + } + + // DefaultNodePoolAdapters is the default list of required adapters for nodepool resources + DefaultNodePoolAdapters = []string{ + "nodepools-configmap", + } ) diff --git a/test-design/testcases/cluster.md b/test-design/testcases/cluster.md index 9575d46..8006cb8 100644 --- a/test-design/testcases/cluster.md +++ b/test-design/testcases/cluster.md @@ -11,7 +11,7 @@ ### Description -This test validates that the workflow can work correctly for clusters resource type. It verifies that when a cluster resource is created via the HyperFleet API, the system correctly processes the resource through its lifecycle, configured adapters execute successfully, and accurately reports status transitions back to the API. The test ensures the complete workflow of CLM can successfully handle clusters resource type requests end-to-end. +This test validates that the workflow can work correctly for clusters resource type. It verifies that when a cluster resource is created via the HyperFleet API, the system correctly processes the resource through its lifecycle, required adapters (configured in the test config) execute successfully, and accurately reports status transitions back to the API. The test validates required adapters first to identify specific failures, then confirms the cluster reaches the final Ready and Available state. This approach ensures the complete workflow of CLM can successfully handle clusters resource type requests end-to-end. --- @@ -19,11 +19,11 @@ This test validates that the workflow can work correctly for clusters resource t |-----------|---------------| | **Pos/Neg** | Positive | | **Priority** | Tier0 | -| **Status** | Draft | -| **Automation** | Not Automated | +| **Status** | Automated | +| **Automation** | Automated | | **Version** | MVP | | **Created** | 2026-01-29 | -| **Updated** | 2026-02-04 | +| **Updated** | 2026-02-09 | --- @@ -38,10 +38,10 @@ This test validates that the workflow can work correctly for clusters resource t ### Test Steps -#### Step 1: Submit a "clusters" resource type request via API +#### Step 1: Submit an API request to create a Cluster resource **Action:** -- Submit a POST request for "clusters" resource type: +- Submit a POST request to create a Cluster resource: ```bash curl -X POST ${API_URL}/api/hyperfleet/v1/clusters \ -H "Content-Type: application/json" \ @@ -63,19 +63,7 @@ curl -X GET ${API_URL}/api/hyperfleet/v1/clusters/{cluster_id} - Cluster `Ready` condition `status: False` - Cluster `Available` condition `status: False` -#### Step 3: Monitor cluster workflow processing - -**Action:** -- Poll cluster status to monitor workflow processing: -```bash -curl -X GET ${API_URL}/api/hyperfleet/v1/clusters/{cluster_id} -``` - -**Expected Result:** -- Cluster `Ready` condition transitions from `status: False` to `status: True` -- This indicates the workflow has processed the cluster request and configured adapters are executing - -#### Step 4: Verify adapter execution results +#### Step 3: Verify required adapter execution results **Action:** - Retrieve adapter statuses information: @@ -85,27 +73,38 @@ curl -X GET ${API_URL}/api/hyperfleet/v1/clusters/{cluster_id}/statuses **Expected Result:** - Response returns HTTP 200 (OK) status code -- Each adapter has all required condition types: `Applied`, `Available`, `Health` +- All required adapters from config are present in the response: + - `clusters-namespace` + - `clusters-job` + - `clusters-deployment` +- Each required adapter has all required condition types: `Applied`, `Available`, `Health` - Each condition has `status: "True"` indicating successful execution - **Adapter condition metadata validation** (for each condition in adapter.conditions): - `reason`: Non-empty string providing human-readable summary of the condition state - `message`: Non-empty string with detailed human-readable description - `last_transition_time`: Valid RFC3339 timestamp of the last status change -- **Adapter status metadata validation** (for each adapter): +- **Adapter status metadata validation** (for each required adapter): - `created_time`: Valid RFC3339 timestamp when the adapter status was first created - `last_report_time`: Valid RFC3339 timestamp when the adapter last reported its status - `observed_generation`: Non-nil integer value equal to 1 for new creation requests -#### Step 5: Verify final cluster state +**Note:** Required adapters are configurable via: +- Config file: `configs/config.yaml` under `adapters.cluster` +- Environment variable: `HYPERFLEET_ADAPTERS_CLUSTER` (comma-separated list) + +#### Step 4: Verify final cluster state **Action:** +- Wait for cluster Ready condition to transition to True - Retrieve final cluster status information: ```bash curl -X GET ${API_URL}/api/hyperfleet/v1/clusters/{cluster_id} ``` **Expected Result:** +- Cluster `Ready` condition transitions from `status: False` to `status: True` - Final cluster conditions have `status: True` for both condition `{"type": "Ready"}` and `{"type": "Available"}` +- Validate that the observedGeneration for the Ready and Available conditions is 1 for a new creation request - This confirms the cluster has reached the desired end state #### Step 5: Cleanup resources @@ -157,9 +156,9 @@ This test verifies that the Kubernetes resources (namespace and job) can be succ ### Test Steps -#### Step 1: Submit a "clusters" resource type request via API +#### Step 1: Submit an API request to create a Cluster resource **Action:** -- Execute cluster creation request: +- Submit a POST request to create a Cluster resource: ```bash curl -X POST ${API_URL}/api/hyperfleet/v1/clusters \ -H "Content-Type: application/json" \ @@ -225,9 +224,9 @@ This test validates that CLM correctly executes workflows with preinstalled depe ### Test Steps -#### Step 1: Submit a "clusters" resource type request via API +#### Step 1: Submit an API request to create a Cluster resource **Action:** -- Execute cluster creation request: +- Submit a POST request to create a Cluster resource: ```bash curl -X POST ${API_URL}/api/hyperfleet/v1/clusters \ -H "Content-Type: application/json" \ @@ -264,7 +263,7 @@ curl -X GET ${API_URL}/api/hyperfleet/v1/clusters/{cluster_id}/statuses - Monitor adapter execution progress **Expected Result:** -- Namespace is created by adapter 1 +- Adapter 1 creates Namespace - After adapter 2 executes completely, adapter 3 is in progress. - Adapters execute in correct order based on preconditions: 1. Adapter 1 (namespace-creator) completes first diff --git a/test-design/testcases/nodepool.md b/test-design/testcases/nodepool.md index 725fc45..c783973 100644 --- a/test-design/testcases/nodepool.md +++ b/test-design/testcases/nodepool.md @@ -40,9 +40,9 @@ This test validates that the workflow can work correctly for nodepools resource ### Test Steps -#### Step 1: Submit a "nodepools" resource type request via API +#### Step 1: Submit an API request to create a NodePool resource **Action:** -- Submit a POST request for "nodepools" resource type (with cluster_id in the payload): +- Submit a POST request to create a NodePool resource (with cluster_id in the payload): ```bash curl -X POST ${API_URL}/api/hyperfleet/v1/nodepools \ -H "Content-Type: application/json" \ @@ -136,9 +136,9 @@ This test verifies that the Kubernetes resources of different types (e.g., confi ### Test Steps -#### Step 1: Submit a "nodepools" resource type request via API +#### Step 1: Submit an API request to create a NodePool resource **Action:** -- Execute nodepool creation request (with cluster_id in the payload): +- Submit a POST request to create a NodePool resource (with cluster_id in the payload): ```bash curl -X POST ${API_URL}/api/hyperfleet/v1/nodepools \ -H "Content-Type: application/json" \ diff --git a/testdata/payloads/clusters/cluster-request.json b/testdata/payloads/clusters/cluster-request.json index 7a33d4d..c3b3672 100644 --- a/testdata/payloads/clusters/cluster-request.json +++ b/testdata/payloads/clusters/cluster-request.json @@ -3,6 +3,7 @@ "name": "hp-cluster-{{.Random}}", "labels": { "environment": "production", + "shard": "1", "team": "platform" }, "spec": {