diff --git a/README.md b/README.md index 579f7d3..416e1de 100644 --- a/README.md +++ b/README.md @@ -79,23 +79,38 @@ make fmt ``` hyperfleet-adapter/ ├── cmd/ -│ └── adapter/ # Main application entry point +│ └── adapter/ # Main application entry point ├── pkg/ -│ ├── errors/ # Error handling utilities -│ └── logger/ # Structured logging with context support +│ ├── constants/ # Shared constants (annotations, labels) +│ ├── errors/ # Error handling utilities +│ ├── health/ # Health and metrics servers +│ ├── logger/ # Structured logging with context support +│ ├── otel/ # OpenTelemetry tracing utilities +│ ├── utils/ # General utility functions +│ └── version/ # Version information ├── internal/ -│ ├── broker_consumer/ # Message broker consumer implementations -│ ├── config_loader/ # Configuration loading logic -│ ├── criteria/ # Precondition and CEL evaluation -│ ├── executor/ # Event execution engine -│ ├── hyperfleet_api/ # HyperFleet API client -│ └── k8s_client/ # Kubernetes client wrapper -├── test/ # Integration tests -├── charts/ # Helm chart for Kubernetes deployment -├── Dockerfile # Multi-stage Docker build -├── Makefile # Build and test automation -├── go.mod # Go module dependencies -└── README.md # This file +│ ├── config_loader/ # Configuration loading and validation +│ ├── criteria/ # Precondition and CEL evaluation +│ ├── executor/ # Event execution engine (phases pipeline) +│ ├── hyperfleet_api/ # HyperFleet API client +│ ├── k8s_client/ # Kubernetes client wrapper +│ ├── maestro_client/ # Maestro/OCM ManifestWork client +│ ├── manifest/ # Manifest utilities (generation, rendering) +│ └── resource_applier/ # ResourceApplier interface (unified apply) +├── test/ +│ └── integration/ # Integration tests +│ ├── config-loader/ # Config loader integration tests +│ ├── executor/ # Executor integration tests +│ ├── k8s_client/ # K8s client integration tests +│ ├── maestro_client/ # Maestro client integration tests +│ └── testutil/ # Test utilities +├── charts/ # Helm chart for Kubernetes deployment +├── configs/ # Configuration templates and examples +├── scripts/ # Build and test scripts +├── Dockerfile # Multi-stage Docker build +├── Makefile # Build and test automation +├── go.mod # Go module dependencies +└── README.md # This file ``` ### Available Make Targets diff --git a/internal/config_loader/loader.go b/internal/config_loader/loader.go index 30d3650..f09ae10 100644 --- a/internal/config_loader/loader.go +++ b/internal/config_loader/loader.go @@ -3,9 +3,8 @@ package config_loader import ( "fmt" "os" - "path/filepath" - "strings" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" "gopkg.in/yaml.v3" ) @@ -250,29 +249,7 @@ func loadYAMLFile(baseDir, refPath string) (map[string]interface{}, error) { // resolvePath resolves a relative path against the base directory and validates // that the resolved path does not escape the base directory. +// This delegates to utils.ResolveSecurePath. func resolvePath(baseDir, refPath string) (string, error) { - baseAbs, err := filepath.Abs(baseDir) - if err != nil { - return "", fmt.Errorf("failed to resolve base directory: %w", err) - } - baseClean := filepath.Clean(baseAbs) - - var targetPath string - if filepath.IsAbs(refPath) { - targetPath = filepath.Clean(refPath) - } else { - targetPath = filepath.Clean(filepath.Join(baseClean, refPath)) - } - - // Check if target path is within base directory - rel, err := filepath.Rel(baseClean, targetPath) - if err != nil { - return "", fmt.Errorf("path %q escapes base directory", refPath) - } - - if strings.HasPrefix(rel, "..") { - return "", fmt.Errorf("path %q escapes base directory", refPath) - } - - return targetPath, nil + return utils.ResolveSecurePath(baseDir, refPath) } diff --git a/internal/executor/executor.go b/internal/executor/executor.go index f5b4134..43cba88 100644 --- a/internal/executor/executor.go +++ b/internal/executor/executor.go @@ -23,25 +23,39 @@ func NewExecutor(config *ExecutorConfig) (*Executor, error) { return nil, err } + log := config.Logger + + // Create phases (each phase contains its own business logic) + paramExtractionPhase := NewParamExtractionPhase(config.Config, config.K8sClient, log) + preconditionsPhase := NewPreconditionsPhase(config.APIClient, config.Config, log) + resourcesPhase := NewResourcesPhase(config.K8sClient, config.Config, log) + postActionsPhase := NewPostActionsPhase(config.APIClient, config.Config, log) + + // Create pipeline with all phases + pipeline := NewPipeline(log, + paramExtractionPhase, + preconditionsPhase, + resourcesPhase, + postActionsPhase, + ) + return &Executor{ - config: config, - precondExecutor: newPreconditionExecutor(config), - resourceExecutor: newResourceExecutor(config), - postActionExecutor: newPostActionExecutor(config), - log: config.Logger, + config: config, + log: log, + pipeline: pipeline, + paramExtractionPhase: paramExtractionPhase, + preconditionsPhase: preconditionsPhase, + resourcesPhase: resourcesPhase, + postActionsPhase: postActionsPhase, }, nil } - func validateExecutorConfig(config *ExecutorConfig) error { if config == nil { return fmt.Errorf("config is required") } - if config.Config == nil { - return fmt.Errorf("config is required") - } - requiredFields := []string{ + "Config", "APIClient", "Logger", "K8sClient"} @@ -89,107 +103,15 @@ func (e *Executor) Execute(ctx context.Context, data interface{}) *ExecutionResu execCtx := NewExecutionContext(ctx, rawData, e.config.Config) - // Initialize execution result - result := &ExecutionResult{ - Status: StatusSuccess, - Params: make(map[string]interface{}), - Errors: make(map[ExecutionPhase]error), - CurrentPhase: PhaseParamExtraction, - } - e.log.Info(ctx, "Processing event") - // Phase 1: Parameter Extraction - e.log.Infof(ctx, "Phase %s: RUNNING", result.CurrentPhase) - if err := e.executeParamExtraction(execCtx); err != nil { - result.Status = StatusFailed - result.Errors[PhaseParamExtraction] = err - execCtx.SetError("ParameterExtractionFailed", err.Error()) - resErr := fmt.Errorf("resource execution failed: %w", err) - errCtx := logger.WithErrorField(ctx, resErr) - e.log.Errorf(errCtx, "Phase %s: FAILED", result.CurrentPhase) - return result - } - result.Params = execCtx.Params - e.log.Debugf(ctx, "Parameter extraction completed: extracted %d params", len(execCtx.Params)) - - // Phase 2: Preconditions - result.CurrentPhase = PhasePreconditions - preconditions := e.config.Config.Spec.Preconditions - e.log.Infof(ctx, "Phase %s: RUNNING - %d configured", result.CurrentPhase, len(preconditions)) - precondOutcome := e.precondExecutor.ExecuteAll(ctx, preconditions, execCtx) - result.PreconditionResults = precondOutcome.Results - - if precondOutcome.Error != nil { - // Process execution error: precondition evaluation failed - result.Status = StatusFailed - precondErr := fmt.Errorf("precondition evaluation failed: error=%w", precondOutcome.Error) - result.Errors[result.CurrentPhase] = precondErr - execCtx.SetError("PreconditionFailed", precondOutcome.Error.Error()) - errCtx := logger.WithErrorField(ctx, precondOutcome.Error) - e.log.Errorf(errCtx, "Phase %s: FAILED", result.CurrentPhase) - result.ResourcesSkipped = true - result.SkipReason = "PreconditionFailed" - execCtx.SetSkipped("PreconditionFailed", precondOutcome.Error.Error()) - // Continue to post actions for error reporting - } else if !precondOutcome.AllMatched { - // Business outcome: precondition not satisfied - result.ResourcesSkipped = true - result.SkipReason = precondOutcome.NotMetReason - execCtx.SetSkipped("PreconditionNotMet", precondOutcome.NotMetReason) - e.log.Infof(ctx, "Phase %s: SUCCESS - NOT_MET - %s", result.CurrentPhase, precondOutcome.NotMetReason) - } else { - // All preconditions matched - e.log.Infof(ctx, "Phase %s: SUCCESS - MET - %d passed", result.CurrentPhase, len(precondOutcome.Results)) - } + // Execute all phases through the pipeline + phaseResults := e.pipeline.Execute(ctx, execCtx) - // Phase 3: Resources (skip if preconditions not met or previous error) - result.CurrentPhase = PhaseResources - resources := e.config.Config.Spec.Resources - e.log.Infof(ctx, "Phase %s: RUNNING - %d configured", result.CurrentPhase, len(resources)) - if !result.ResourcesSkipped { - resourceResults, err := e.resourceExecutor.ExecuteAll(ctx, resources, execCtx) - result.ResourceResults = resourceResults - - if err != nil { - result.Status = StatusFailed - resErr := fmt.Errorf("resource execution failed: %w", err) - result.Errors[result.CurrentPhase] = resErr - execCtx.SetError("ResourceFailed", err.Error()) - errCtx := logger.WithErrorField(ctx, err) - e.log.Errorf(errCtx, "Phase %s: FAILED", result.CurrentPhase) - // Continue to post actions for error reporting - } else { - e.log.Infof(ctx, "Phase %s: SUCCESS - %d processed", result.CurrentPhase, len(resourceResults)) - } - } else { - e.log.Infof(ctx, "Phase %s: SKIPPED - %s", result.CurrentPhase, result.SkipReason) - } - - // Phase 4: Post Actions (always execute for error reporting) - result.CurrentPhase = PhasePostActions - postConfig := e.config.Config.Spec.Post - postActionCount := 0 - if postConfig != nil { - postActionCount = len(postConfig.PostActions) - } - e.log.Infof(ctx, "Phase %s: RUNNING - %d configured", result.CurrentPhase, postActionCount) - postResults, err := e.postActionExecutor.ExecuteAll(ctx, postConfig, execCtx) - result.PostActionResults = postResults - - if err != nil { - result.Status = StatusFailed - postErr := fmt.Errorf("post action execution failed: %w", err) - result.Errors[result.CurrentPhase] = postErr - errCtx := logger.WithErrorField(ctx, err) - e.log.Errorf(errCtx, "Phase %s: FAILED", result.CurrentPhase) - } else { - e.log.Infof(ctx, "Phase %s: SUCCESS - %d executed", result.CurrentPhase, len(postResults)) - } - - // Finalize - result.ExecutionContext = execCtx + // Build execution result from phase results + result := e.buildExecutionResult(phaseResults, execCtx) + // Log final status if result.Status == StatusSuccess { e.log.Infof(ctx, "Event execution finished: event_execution_status=success resources_skipped=%t reason=%s", result.ResourcesSkipped, result.SkipReason) } else { @@ -202,20 +124,53 @@ func (e *Executor) Execute(ctx context.Context, data interface{}) *ExecutionResu errCtx := logger.WithErrorField(ctx, combinedErr) e.log.Errorf(errCtx, "Event execution finished: event_execution_status=failed") } + return result } -// executeParamExtraction extracts parameters from the event and environment -func (e *Executor) executeParamExtraction(execCtx *ExecutionContext) error { - // Extract configured parameters - if err := extractConfigParams(e.config.Config, execCtx, e.config.K8sClient); err != nil { - return err +// buildExecutionResult converts phase results into the final ExecutionResult +func (e *Executor) buildExecutionResult(phaseResults []PhaseResult, execCtx *ExecutionContext) *ExecutionResult { + result := &ExecutionResult{ + Status: StatusSuccess, + Params: execCtx.Params, + Errors: make(map[ExecutionPhase]error), + ExecutionContext: execCtx, } - // Add metadata params - addMetadataParams(e.config.Config, execCtx) + // Track the last phase that ran + for _, pr := range phaseResults { + result.CurrentPhase = pr.Phase - return nil + if pr.Error != nil { + result.Status = StatusFailed + result.Errors[pr.Phase] = pr.Error + } + + if pr.Skipped && pr.Phase == PhaseResources { + result.ResourcesSkipped = true + result.SkipReason = pr.SkipReason + } + } + + // Collect results from individual phases + if e.preconditionsPhase != nil { + result.PreconditionResults = e.preconditionsPhase.Results() + // Update skip reason from preconditions outcome if available + if outcome := e.preconditionsPhase.Outcome(); outcome != nil && !outcome.AllMatched { + result.ResourcesSkipped = true + result.SkipReason = outcome.NotMetReason + } + } + + if e.resourcesPhase != nil { + result.ResourceResults = e.resourcesPhase.Results() + } + + if e.postActionsPhase != nil { + result.PostActionResults = e.postActionsPhase.Results() + } + + return result } // startTracedExecution creates an OTel span and adds trace context to logs. diff --git a/internal/executor/executor_test.go b/internal/executor/executor_test.go index 38f668b..0e31ddb 100644 --- a/internal/executor/executor_test.go +++ b/internal/executor/executor_test.go @@ -286,7 +286,7 @@ func TestExecute_ParamExtraction(t *testing.T) { } } -func TestParamExtractor(t *testing.T) { +func TestParamExtractionPhase(t *testing.T) { t.Setenv("TEST_ENV", "env-value") evt := event.New() @@ -361,8 +361,9 @@ func TestParamExtractor(t *testing.T) { }, } - // Extract params using pure function - err := extractConfigParams(config, execCtx, nil) + // Create ParamExtractionPhase and extract params + phase := NewParamExtractionPhase(config, nil, logger.NewTestLogger()) + err := phase.extractConfigParams(execCtx) if tt.expectError { assert.Error(t, err) diff --git a/internal/executor/param_extractor.go b/internal/executor/param_extractor.go deleted file mode 100644 index 87728a3..0000000 --- a/internal/executor/param_extractor.go +++ /dev/null @@ -1,362 +0,0 @@ -package executor - -import ( - "context" - "fmt" - "math" - "os" - "strconv" - "strings" - - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" -) - -// ParamConfig interface allows extractConfigParams to work with both AdapterConfig and Config -type ParamConfig interface { - GetParams() []config_loader.Parameter - GetMetadata() config_loader.Metadata -} - -// extractConfigParams extracts all configured parameters and populates execCtx.Params -// This is a pure function that directly modifies execCtx for simplicity -func extractConfigParams(config ParamConfig, execCtx *ExecutionContext, k8sClient k8s_client.K8sClient) error { - for _, param := range config.GetParams() { - value, err := extractParam(execCtx.Ctx, param, execCtx.EventData, k8sClient) - if err != nil { - if param.Required { - return NewExecutorError(PhaseParamExtraction, param.Name, - fmt.Sprintf("failed to extract required parameter '%s' from source '%s'", param.Name, param.Source), err) - } - // Use default for non-required params if extraction fails - if param.Default != nil { - execCtx.Params[param.Name] = param.Default - } - continue - } - - // Apply default if value is nil or (for strings) empty - isEmpty := value == nil - if s, ok := value.(string); ok && s == "" { - isEmpty = true - } - if isEmpty && param.Default != nil { - value = param.Default - } - - // Apply type conversion if specified - if value != nil && param.Type != "" { - converted, convErr := convertParamType(value, param.Type) - if convErr != nil { - if param.Required { - return NewExecutorError(PhaseParamExtraction, param.Name, - fmt.Sprintf("failed to convert parameter '%s' to type '%s'", param.Name, param.Type), convErr) - } - // Use default for non-required params if conversion fails - if param.Default != nil { - execCtx.Params[param.Name] = param.Default - } - continue - } - value = converted - } - - if value != nil { - execCtx.Params[param.Name] = value - } - } - - return nil -} - -// extractParam extracts a single parameter based on its source -func extractParam(ctx context.Context, param config_loader.Parameter, eventData map[string]interface{}, k8sClient k8s_client.K8sClient) (interface{}, error) { - source := param.Source - - // Handle different source types - switch { - case strings.HasPrefix(source, "env."): - return extractFromEnv(source[4:]) - case strings.HasPrefix(source, "event."): - return extractFromEvent(source[6:], eventData) - case strings.HasPrefix(source, "secret."): - return extractFromSecret(ctx, source[7:], k8sClient) - case strings.HasPrefix(source, "configmap."): - return extractFromConfigMap(ctx, source[10:], k8sClient) - case source == "": - // No source specified, return default or nil - return param.Default, nil - default: - // Try to extract from event data directly - return extractFromEvent(source, eventData) - } -} - -// extractFromEnv extracts a value from environment variables -func extractFromEnv(envVar string) (interface{}, error) { - value, exists := os.LookupEnv(envVar) - if !exists { - return nil, fmt.Errorf("environment variable %s not set", envVar) - } - return value, nil -} - -// extractFromEvent extracts a value from event data using dot notation -func extractFromEvent(path string, eventData map[string]interface{}) (interface{}, error) { - parts := strings.Split(path, ".") - var current interface{} = eventData - - for i, part := range parts { - switch v := current.(type) { - case map[string]interface{}: - val, ok := v[part] - if !ok { - return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) - } - current = val - case map[interface{}]interface{}: - val, ok := v[part] - if !ok { - return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) - } - current = val - default: - return nil, fmt.Errorf("cannot access field '%s': parent is not a map (got %T)", part, current) - } - } - - return current, nil -} - -// extractFromSecret extracts a value from a Kubernetes Secret -// Format: secret... (namespace is required) -func extractFromSecret(ctx context.Context, path string, k8sClient k8s_client.K8sClient) (interface{}, error) { - if k8sClient == nil { - return nil, fmt.Errorf("kubernetes client not configured, cannot extract from secret") - } - - value, err := k8sClient.ExtractFromSecret(ctx, path) - if err != nil { - return nil, err - } - - return value, nil -} - -// extractFromConfigMap extracts a value from a Kubernetes ConfigMap -// Format: configmap... (namespace is required) -func extractFromConfigMap(ctx context.Context, path string, k8sClient k8s_client.K8sClient) (interface{}, error) { - if k8sClient == nil { - return nil, fmt.Errorf("kubernetes client not configured, cannot extract from configmap") - } - - value, err := k8sClient.ExtractFromConfigMap(ctx, path) - if err != nil { - return nil, err - } - - return value, nil -} - -// addMetadataParams adds adapter and event metadata to execCtx.Params -func addMetadataParams(config ParamConfig, execCtx *ExecutionContext) { - metadata := config.GetMetadata() - // Add metadata from adapter config - execCtx.Params["metadata"] = map[string]interface{}{ - "name": metadata.Name, - "labels": metadata.Labels, - } -} - -// convertParamType converts a value to the specified type -// Supported types: string, int, int64, float, float64, bool -func convertParamType(value interface{}, targetType string) (interface{}, error) { - // If value is already the target type, return as-is - switch targetType { - case "string": - return convertToString(value) - case "int", "int64": - return convertToInt64(value) - case "float", "float64": - return convertToFloat64(value) - case "bool": - return convertToBool(value) - default: - return nil, fmt.Errorf("unsupported type: %s (supported: string, int, int64, float, float64, bool)", targetType) - } -} - -// convertToString converts a value to string -// -//nolint:unparam // error kept for API consistency with convertToInt64 -func convertToString(value interface{}) (string, error) { - switch v := value.(type) { - case string: - return v, nil - case int, int8, int16, int32, int64: - return fmt.Sprintf("%d", v), nil - case uint, uint8, uint16, uint32, uint64: - return fmt.Sprintf("%d", v), nil - case float32: - return strconv.FormatFloat(float64(v), 'f', -1, 32), nil - case float64: - return strconv.FormatFloat(v, 'f', -1, 64), nil - case bool: - return strconv.FormatBool(v), nil - default: - return fmt.Sprintf("%v", v), nil - } -} - -// convertToInt64 converts a value to int64 -func convertToInt64(value interface{}) (int64, error) { - switch v := value.(type) { - case int: - return int64(v), nil - case uint64: - if v > math.MaxInt64 { - return 0, fmt.Errorf("uint64 value %d overflows int64", v) - } - return int64(v), nil - case int8: - return int64(v), nil - case int16: - return int64(v), nil - case int32: - return int64(v), nil - case int64: - return v, nil - case uint: - if v > uint(math.MaxInt64) { - return 0, fmt.Errorf("uint value %d overflows int64", v) - } - return int64(v), nil - case uint8: - return int64(v), nil - case uint16: - return int64(v), nil - case uint32: - return int64(v), nil - case float32: - return int64(v), nil - case float64: - return int64(v), nil - case string: - // Try parsing as int first - if i, err := strconv.ParseInt(v, 10, 64); err == nil { - return i, nil - } - // Try parsing as float and convert - if f, err := strconv.ParseFloat(v, 64); err == nil { - return int64(f), nil - } - return 0, fmt.Errorf("cannot convert string '%s' to int", v) - case bool: - if v { - return 1, nil - } - return 0, nil - default: - return 0, fmt.Errorf("cannot convert %T to int", value) - } -} - -// convertToFloat64 converts a value to float64 -func convertToFloat64(value interface{}) (float64, error) { - switch v := value.(type) { - case float32: - return float64(v), nil - case float64: - return v, nil - case int: - return float64(v), nil - case int8: - return float64(v), nil - case int16: - return float64(v), nil - case int32: - return float64(v), nil - case int64: - return float64(v), nil - case uint: - return float64(v), nil - case uint8: - return float64(v), nil - case uint16: - return float64(v), nil - case uint32: - return float64(v), nil - case uint64: - return float64(v), nil - case string: - f, err := strconv.ParseFloat(v, 64) - if err != nil { - return 0, fmt.Errorf("cannot convert string '%s' to float: %w", v, err) - } - return f, nil - case bool: - if v { - return 1.0, nil - } - return 0.0, nil - default: - return 0, fmt.Errorf("cannot convert %T to float", value) - } -} - -// convertToBool converts a value to bool -func convertToBool(value interface{}) (bool, error) { - switch v := value.(type) { - case bool: - return v, nil - case string: - // Empty string is treated as false - if v == "" { - return false, nil - } - b, err := strconv.ParseBool(v) - if err != nil { - // Handle common truthy/falsy strings - lower := strings.ToLower(v) - switch lower { - case "yes", "y", "on", "1": - return true, nil - case "no", "n", "off", "0": - return false, nil - } - return false, fmt.Errorf("cannot convert string '%s' to bool", v) - } - return b, nil - // NOTE: Each numeric type needs its own case arm. In Go type switches, combined - // cases like "case int, int8, int16:" keep v as interface{}, so "v != 0" would - // compare interface{}(int8(0)) with interface{}(int(0)) - different types that - // are never equal, causing int8(0) to incorrectly return true. - // With separate arms, v is bound to the concrete type, enabling correct comparison. - case int: - return v != 0, nil - case int8: - return v != 0, nil - case int16: - return v != 0, nil - case int32: - return v != 0, nil - case int64: - return v != 0, nil - case uint: - return v != 0, nil - case uint8: - return v != 0, nil - case uint16: - return v != 0, nil - case uint32: - return v != 0, nil - case uint64: - return v != 0, nil - case float32: - return v != 0, nil - case float64: - return v != 0, nil - default: - return false, fmt.Errorf("cannot convert %T to bool", value) - } -} diff --git a/internal/executor/param_extractor_test.go b/internal/executor/param_extractor_test.go index 3703ee5..34acb4e 100644 --- a/internal/executor/param_extractor_test.go +++ b/internal/executor/param_extractor_test.go @@ -3,6 +3,7 @@ package executor import ( "testing" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -86,7 +87,7 @@ func TestConvertToString(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := convertToString(tt.value) + got, err := utils.ConvertToString(tt.value) require.NoError(t, err) assert.Equal(t, tt.want, got) }) @@ -114,7 +115,7 @@ func TestConvertToInt64(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := convertToInt64(tt.value) + got, err := utils.ConvertToInt64(tt.value) if tt.wantErr { require.Error(t, err) } else { @@ -146,7 +147,7 @@ func TestConvertToFloat64(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := convertToFloat64(tt.value) + got, err := utils.ConvertToFloat64(tt.value) if tt.wantErr { require.Error(t, err) } else { @@ -190,7 +191,7 @@ func TestConvertToBool(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := convertToBool(tt.value) + got, err := utils.ConvertToBool(tt.value) if tt.wantErr { require.Error(t, err) } else { diff --git a/internal/executor/phase.go b/internal/executor/phase.go new file mode 100644 index 0000000..e427662 --- /dev/null +++ b/internal/executor/phase.go @@ -0,0 +1,127 @@ +package executor + +import ( + "context" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" +) + +// Phase defines the interface for execution phases. +// Each phase is responsible for a specific part of the execution pipeline. +type Phase interface { + // Name returns the phase identifier + Name() ExecutionPhase + // Execute runs the phase logic + Execute(ctx context.Context, execCtx *ExecutionContext) error + // ShouldSkip determines if this phase should be skipped based on execution context + ShouldSkip(execCtx *ExecutionContext) (skip bool, reason string) +} + +// PhaseResult contains the outcome of a single phase execution +type PhaseResult struct { + // Phase is the phase identifier + Phase ExecutionPhase + // Status is the execution status (success/failed) + Status ExecutionStatus + // Error contains any error that occurred during execution + Error error + // Skipped indicates if the phase was skipped + Skipped bool + // SkipReason explains why the phase was skipped + SkipReason string + // Duration is how long the phase took to execute + Duration time.Duration +} + +// Pipeline executes phases in sequence +type Pipeline struct { + phases []Phase + log logger.Logger +} + +// NewPipeline creates a new execution pipeline with the given phases +func NewPipeline(log logger.Logger, phases ...Phase) *Pipeline { + return &Pipeline{ + phases: phases, + log: log, + } +} + +// Execute runs all phases in sequence and returns results for each phase. +// The pipeline stops on error for most phases, except post-actions which always run. +func (p *Pipeline) Execute(ctx context.Context, execCtx *ExecutionContext) []PhaseResult { + var results []PhaseResult + var shouldStopExecution bool + + for _, phase := range p.phases { + start := time.Now() + phaseName := phase.Name() + + // Check if phase should be skipped + if skip, reason := phase.ShouldSkip(execCtx); skip { + p.log.Infof(ctx, "Phase %s: SKIPPED - %s", phaseName, reason) + results = append(results, PhaseResult{ + Phase: phaseName, + Status: StatusSuccess, + Skipped: true, + SkipReason: reason, + Duration: time.Since(start), + }) + continue + } + + // Skip execution if a previous phase failed (except post-actions always run) + if shouldStopExecution && phaseName != PhasePostActions { + p.log.Infof(ctx, "Phase %s: SKIPPED - previous phase failed", phaseName) + results = append(results, PhaseResult{ + Phase: phaseName, + Status: StatusSuccess, + Skipped: true, + SkipReason: "previous phase failed", + Duration: time.Since(start), + }) + continue + } + + // Execute the phase + p.log.Infof(ctx, "Phase %s: RUNNING", phaseName) + err := phase.Execute(ctx, execCtx) + duration := time.Since(start) + + result := PhaseResult{ + Phase: phaseName, + Error: err, + Duration: duration, + } + + if err != nil { + result.Status = StatusFailed + p.log.Errorf(logger.WithErrorField(ctx, err), "Phase %s: FAILED", phaseName) + // Mark that execution should stop for subsequent phases (except post-actions) + shouldStopExecution = true + } else { + result.Status = StatusSuccess + p.log.Infof(ctx, "Phase %s: SUCCESS", phaseName) + } + + results = append(results, result) + } + + return results +} + +// GetPhase returns a phase by name, or nil if not found +func (p *Pipeline) GetPhase(name ExecutionPhase) Phase { + for _, phase := range p.phases { + if phase.Name() == name { + return phase + } + } + return nil +} + +// Phases returns all phases in the pipeline +func (p *Pipeline) Phases() []Phase { + return p.phases +} diff --git a/internal/executor/phase_param_extraction.go b/internal/executor/phase_param_extraction.go new file mode 100644 index 0000000..9b7eec9 --- /dev/null +++ b/internal/executor/phase_param_extraction.go @@ -0,0 +1,185 @@ +package executor + +import ( + "context" + "fmt" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" +) + +// ParamExtractionPhase handles parameter extraction from events and environment +type ParamExtractionPhase struct { + config *config_loader.Config + k8sClient k8s_client.K8sClient + log logger.Logger +} + +// NewParamExtractionPhase creates a new parameter extraction phase +func NewParamExtractionPhase(config *config_loader.Config, k8sClient k8s_client.K8sClient, log logger.Logger) *ParamExtractionPhase { + return &ParamExtractionPhase{ + config: config, + k8sClient: k8sClient, + log: log, + } +} + +// Name returns the phase identifier +func (p *ParamExtractionPhase) Name() ExecutionPhase { + return PhaseParamExtraction +} + +// ShouldSkip determines if this phase should be skipped +func (p *ParamExtractionPhase) ShouldSkip(execCtx *ExecutionContext) (bool, string) { + // Parameter extraction is always executed + return false, "" +} + +// Execute runs parameter extraction logic +func (p *ParamExtractionPhase) Execute(ctx context.Context, execCtx *ExecutionContext) error { + // Extract configured parameters + if err := p.extractConfigParams(execCtx); err != nil { + execCtx.SetError("ParameterExtractionFailed", err.Error()) + return err + } + + // Add metadata params + p.addMetadataParams(execCtx) + + p.log.Debugf(ctx, "Parameter extraction completed: extracted %d params", len(execCtx.Params)) + return nil +} + +// extractConfigParams extracts all configured parameters and populates execCtx.Params +func (p *ParamExtractionPhase) extractConfigParams(execCtx *ExecutionContext) error { + for _, param := range p.config.Spec.Params { + value, err := p.extractParam(execCtx.Ctx, param, execCtx.EventData) + if err != nil { + if param.Required { + return NewExecutorError(PhaseParamExtraction, param.Name, + fmt.Sprintf("failed to extract required parameter '%s' from source '%s'", param.Name, param.Source), err) + } + // Use default for non-required params if extraction fails + if param.Default != nil { + execCtx.Params[param.Name] = param.Default + } + continue + } + + // Apply default if value is nil or (for strings) empty + isEmpty := value == nil + if s, ok := value.(string); ok && s == "" { + isEmpty = true + } + if isEmpty && param.Default != nil { + value = param.Default + } + + // Apply type conversion if specified + if value != nil && param.Type != "" { + converted, convErr := convertParamType(value, param.Type) + if convErr != nil { + if param.Required { + return NewExecutorError(PhaseParamExtraction, param.Name, + fmt.Sprintf("failed to convert parameter '%s' to type '%s'", param.Name, param.Type), convErr) + } + // Use default for non-required params if conversion fails + if param.Default != nil { + execCtx.Params[param.Name] = param.Default + } + continue + } + value = converted + } + + if value != nil { + execCtx.Params[param.Name] = value + } + } + + return nil +} + +// extractParam extracts a single parameter based on its source +func (p *ParamExtractionPhase) extractParam(ctx context.Context, param config_loader.Parameter, eventData map[string]interface{}) (interface{}, error) { + source := param.Source + + // Handle different source types + switch { + case strings.HasPrefix(source, "env."): + return extractFromEnv(source[4:]) + case strings.HasPrefix(source, "event."): + return extractFromEvent(source[6:], eventData) + case strings.HasPrefix(source, "secret."): + return p.extractFromSecret(ctx, source[7:]) + case strings.HasPrefix(source, "configmap."): + return p.extractFromConfigMap(ctx, source[10:]) + case source == "": + // No source specified, return default or nil + return param.Default, nil + default: + // Try to extract from event data directly + return extractFromEvent(source, eventData) + } +} + +// extractFromEnv extracts a value from environment variables. +// This delegates to utils.GetEnvOrError. +func extractFromEnv(envVar string) (interface{}, error) { + return utils.GetEnvOrError(envVar) +} + +// extractFromEvent extracts a value from event data using dot notation. +// This delegates to utils.GetNestedValue. +func extractFromEvent(path string, eventData map[string]interface{}) (interface{}, error) { + return utils.GetNestedValue(eventData, path) +} + +// extractFromSecret extracts a value from a Kubernetes Secret +// Format: secret... (namespace is required) +func (p *ParamExtractionPhase) extractFromSecret(ctx context.Context, path string) (interface{}, error) { + if p.k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured, cannot extract from secret") + } + + value, err := p.k8sClient.ExtractFromSecret(ctx, path) + if err != nil { + return nil, err + } + + return value, nil +} + +// extractFromConfigMap extracts a value from a Kubernetes ConfigMap +// Format: configmap... (namespace is required) +func (p *ParamExtractionPhase) extractFromConfigMap(ctx context.Context, path string) (interface{}, error) { + if p.k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured, cannot extract from configmap") + } + + value, err := p.k8sClient.ExtractFromConfigMap(ctx, path) + if err != nil { + return nil, err + } + + return value, nil +} + +// addMetadataParams adds adapter and event metadata to execCtx.Params +func (p *ParamExtractionPhase) addMetadataParams(execCtx *ExecutionContext) { + // Add metadata from adapter config + execCtx.Params["metadata"] = map[string]interface{}{ + "name": p.config.Metadata.Name, + "labels": p.config.Metadata.Labels, + } +} + +// convertParamType converts a value to the specified type. +// This delegates to utils.ConvertToType. +// Supported types: string, int, int64, float, float64, bool +func convertParamType(value interface{}, targetType string) (interface{}, error) { + return utils.ConvertToType(value, targetType) +} diff --git a/internal/executor/post_action_executor.go b/internal/executor/phase_post_actions.go similarity index 57% rename from internal/executor/post_action_executor.go rename to internal/executor/phase_post_actions.go index 85b9638..9638637 100644 --- a/internal/executor/post_action_executor.go +++ b/internal/executor/phase_post_actions.go @@ -9,36 +9,79 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" ) -// PostActionExecutor executes post-processing actions -type PostActionExecutor struct { +// PostActionsPhase handles post-action execution (API calls, logging, etc.) +type PostActionsPhase struct { apiClient hyperfleet_api.Client + config *config_loader.Config log logger.Logger + // results stores the post-action results for later retrieval + results []PostActionResult } -// newPostActionExecutor creates a new post-action executor -// NOTE: Caller (NewExecutor) is responsible for config validation -func newPostActionExecutor(config *ExecutorConfig) *PostActionExecutor { - return &PostActionExecutor{ - apiClient: config.APIClient, - log: config.Logger, +// NewPostActionsPhase creates a new post-actions phase +func NewPostActionsPhase(apiClient hyperfleet_api.Client, config *config_loader.Config, log logger.Logger) *PostActionsPhase { + return &PostActionsPhase{ + apiClient: apiClient, + config: config, + log: log, } } -// ExecuteAll executes all post-processing actions +// Name returns the phase identifier +func (p *PostActionsPhase) Name() ExecutionPhase { + return PhasePostActions +} + +// ShouldSkip determines if this phase should be skipped +func (p *PostActionsPhase) ShouldSkip(execCtx *ExecutionContext) (bool, string) { + // Post-actions are always executed (even on error) for error reporting + // Only skip if there are no post-actions configured + if p.config.Spec.Post == nil || len(p.config.Spec.Post.PostActions) == 0 { + return true, "no post-actions configured" + } + return false, "" +} + +// Execute runs post-action logic +func (p *PostActionsPhase) Execute(ctx context.Context, execCtx *ExecutionContext) error { + postActionCount := 0 + if p.config.Spec.Post != nil { + postActionCount = len(p.config.Spec.Post.PostActions) + } + p.log.Infof(ctx, "Executing %d post-actions", postActionCount) + + results, err := p.executeAll(ctx, p.config.Spec.Post, execCtx) + p.results = results + + if err != nil { + return fmt.Errorf("post action execution failed: %w", err) + } + + p.log.Infof(ctx, "Successfully executed %d post-actions", len(results)) + return nil +} + +// Results returns the post-action results +func (p *PostActionsPhase) Results() []PostActionResult { + return p.results +} + +// executeAll executes all post-processing actions // First builds payloads from post.payloads, then executes post.postActions -func (pae *PostActionExecutor) ExecuteAll(ctx context.Context, postConfig *config_loader.PostConfig, execCtx *ExecutionContext) ([]PostActionResult, error) { +func (p *PostActionsPhase) executeAll(ctx context.Context, postConfig *config_loader.PostConfig, execCtx *ExecutionContext) ([]PostActionResult, error) { if postConfig == nil { return []PostActionResult{}, nil } // Step 1: Build post payloads (like clusterStatusPayload) if len(postConfig.Payloads) > 0 { - pae.log.Infof(ctx, "Building %d post payloads", len(postConfig.Payloads)) - if err := pae.buildPostPayloads(ctx, postConfig.Payloads, execCtx); err != nil { + p.log.Infof(ctx, "Building %d post payloads", len(postConfig.Payloads)) + if err := p.buildPostPayloads(ctx, postConfig.Payloads, execCtx); err != nil { errCtx := logger.WithErrorField(ctx, err) - pae.log.Errorf(errCtx, "Failed to build post payloads") + p.log.Errorf(errCtx, "Failed to build post payloads") execCtx.Adapter.ExecutionError = &ExecutionError{ Phase: string(PhasePostActions), Step: "build_payloads", @@ -47,19 +90,19 @@ func (pae *PostActionExecutor) ExecuteAll(ctx context.Context, postConfig *confi return []PostActionResult{}, NewExecutorError(PhasePostActions, "build_payloads", "failed to build post payloads", err) } for _, payload := range postConfig.Payloads { - pae.log.Debugf(ctx, "payload[%s] built successfully", payload.Name) + p.log.Debugf(ctx, "payload[%s] built successfully", payload.Name) } } // Step 2: Execute post actions (sequential - stop on first failure) results := make([]PostActionResult, 0, len(postConfig.PostActions)) for _, action := range postConfig.PostActions { - result, err := pae.executePostAction(ctx, action, execCtx) + result, err := p.executePostAction(ctx, action, execCtx) results = append(results, result) if err != nil { errCtx := logger.WithErrorField(ctx, err) - pae.log.Errorf(errCtx, "PostAction[%s] processed: FAILED", action.Name) + p.log.Errorf(errCtx, "PostAction[%s] processed: FAILED", action.Name) // Set ExecutionError for failed post action execCtx.Adapter.ExecutionError = &ExecutionError{ @@ -71,7 +114,7 @@ func (pae *PostActionExecutor) ExecuteAll(ctx context.Context, postConfig *confi // Stop execution - don't run remaining post actions return results, err } - pae.log.Infof(ctx, "PostAction[%s] processed: SUCCESS - status=%s", action.Name, result.Status) + p.log.Infof(ctx, "PostAction[%s] processed: SUCCESS - status=%s", action.Name, result.Status) } return results, nil @@ -79,12 +122,12 @@ func (pae *PostActionExecutor) ExecuteAll(ctx context.Context, postConfig *confi // buildPostPayloads builds all post payloads and stores them in execCtx.Params // Payloads are complex structures built from CEL expressions and templates -func (pae *PostActionExecutor) buildPostPayloads(ctx context.Context, payloads []config_loader.Payload, execCtx *ExecutionContext) error { +func (p *PostActionsPhase) buildPostPayloads(ctx context.Context, payloads []config_loader.Payload, execCtx *ExecutionContext) error { // Create evaluation context with all CEL variables (params, adapter, resources) evalCtx := criteria.NewEvaluationContext() evalCtx.SetVariablesFromMap(execCtx.GetCELVariables()) - evaluator, err := criteria.NewEvaluator(ctx, evalCtx, pae.log) + evaluator, err := criteria.NewEvaluator(ctx, evalCtx, p.log) if err != nil { return fmt.Errorf("failed to create evaluator: %w", err) } @@ -101,7 +144,7 @@ func (pae *PostActionExecutor) buildPostPayloads(ctx context.Context, payloads [ } // Build the payload - builtPayload, err := pae.buildPayload(ctx, buildDef, evaluator, execCtx.Params) + builtPayload, err := p.buildPayload(ctx, buildDef, evaluator, execCtx.Params) if err != nil { return fmt.Errorf("failed to build payload '%s': %w", payload.Name, err) } @@ -121,20 +164,20 @@ func (pae *PostActionExecutor) buildPostPayloads(ctx context.Context, payloads [ // buildPayload builds a payload from a build definition // The build definition can contain expressions that need to be evaluated -func (pae *PostActionExecutor) buildPayload(ctx context.Context, build any, evaluator *criteria.Evaluator, params map[string]any) (any, error) { +func (p *PostActionsPhase) buildPayload(ctx context.Context, build any, evaluator *criteria.Evaluator, params map[string]any) (any, error) { switch v := build.(type) { case map[string]any: - return pae.buildMapPayload(ctx, v, evaluator, params) + return p.buildMapPayload(ctx, v, evaluator, params) case map[any]any: - converted := convertToStringKeyMap(v) - return pae.buildMapPayload(ctx, converted, evaluator, params) + converted := utils.ConvertToStringKeyMap(v) + return p.buildMapPayload(ctx, converted, evaluator, params) default: return build, nil } } // buildMapPayload builds a map payload, evaluating expressions as needed -func (pae *PostActionExecutor) buildMapPayload(ctx context.Context, m map[string]any, evaluator *criteria.Evaluator, params map[string]any) (map[string]any, error) { +func (p *PostActionsPhase) buildMapPayload(ctx context.Context, m map[string]any, evaluator *criteria.Evaluator, params map[string]any) (map[string]any, error) { result := make(map[string]any) for k, v := range m { @@ -145,7 +188,7 @@ func (pae *PostActionExecutor) buildMapPayload(ctx context.Context, m map[string } // Process the value - processedValue, err := pae.processValue(ctx, v, evaluator, params) + processedValue, err := p.processValue(ctx, v, evaluator, params) if err != nil { return nil, fmt.Errorf("failed to process value for key '%s': %w", k, err) } @@ -157,7 +200,7 @@ func (pae *PostActionExecutor) buildMapPayload(ctx context.Context, m map[string } // processValue processes a value, evaluating expressions as needed -func (pae *PostActionExecutor) processValue(ctx context.Context, v any, evaluator *criteria.Evaluator, params map[string]any) (any, error) { +func (p *PostActionsPhase) processValue(ctx context.Context, v any, evaluator *criteria.Evaluator, params map[string]any) (any, error) { switch val := v.(type) { case map[string]any: // Check if this is a value definition: { field: "...", default: ... } or { expression: "...", default: ... } @@ -170,7 +213,7 @@ func (pae *PostActionExecutor) processValue(ctx context.Context, v any, evaluato // If value is nil (field not found or empty), use default if result.Value == nil { if valueDef.Default != nil { - pae.log.Debugf(ctx, "Using default value for '%s': %v", result.Source, valueDef.Default) + p.log.Debugf(ctx, "Using default value for '%s': %v", result.Source, valueDef.Default) } return valueDef.Default, nil } @@ -178,16 +221,16 @@ func (pae *PostActionExecutor) processValue(ctx context.Context, v any, evaluato } // Recursively process nested maps - return pae.buildMapPayload(ctx, val, evaluator, params) + return p.buildMapPayload(ctx, val, evaluator, params) case map[any]any: - converted := convertToStringKeyMap(val) - return pae.processValue(ctx, converted, evaluator, params) + converted := utils.ConvertToStringKeyMap(val) + return p.processValue(ctx, converted, evaluator, params) case []any: result := make([]any, len(val)) for i, item := range val { - processed, err := pae.processValue(ctx, item, evaluator, params) + processed, err := p.processValue(ctx, item, evaluator, params) if err != nil { return nil, err } @@ -204,7 +247,7 @@ func (pae *PostActionExecutor) processValue(ctx context.Context, v any, evaluato } // executePostAction executes a single post-action -func (pae *PostActionExecutor) executePostAction(ctx context.Context, action config_loader.PostAction, execCtx *ExecutionContext) (PostActionResult, error) { +func (p *PostActionsPhase) executePostAction(ctx context.Context, action config_loader.PostAction, execCtx *ExecutionContext) (PostActionResult, error) { result := PostActionResult{ Name: action.Name, Status: StatusSuccess, @@ -212,12 +255,12 @@ func (pae *PostActionExecutor) executePostAction(ctx context.Context, action con // Execute log action if configured if action.Log != nil { - ExecuteLogAction(ctx, action.Log, execCtx, pae.log) + ExecuteLogAction(ctx, action.Log, execCtx, p.log) } // Execute API call if configured if action.APICall != nil { - if err := pae.executeAPICall(ctx, action.APICall, execCtx, &result); err != nil { + if err := p.executeAPICall(ctx, action.APICall, execCtx, &result); err != nil { return result, err } } @@ -226,8 +269,8 @@ func (pae *PostActionExecutor) executePostAction(ctx context.Context, action con } // executeAPICall executes an API call and populates the result with response details -func (pae *PostActionExecutor) executeAPICall(ctx context.Context, apiCall *config_loader.APICall, execCtx *ExecutionContext, result *PostActionResult) error { - resp, url, err := ExecuteAPICall(ctx, apiCall, execCtx, pae.apiClient, pae.log) +func (p *PostActionsPhase) executeAPICall(ctx context.Context, apiCall *config_loader.APICall, execCtx *ExecutionContext, result *PostActionResult) error { + resp, url, err := ExecuteAPICall(ctx, apiCall, execCtx, p.apiClient, p.log) result.APICallMade = true // Capture response details if available (even if err != nil) diff --git a/internal/executor/precondition_executor.go b/internal/executor/phase_preconditions.go similarity index 60% rename from internal/executor/precondition_executor.go rename to internal/executor/phase_preconditions.go index cd55527..31f7a79 100644 --- a/internal/executor/precondition_executor.go +++ b/internal/executor/phase_preconditions.go @@ -12,34 +12,93 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" ) -// PreconditionExecutor evaluates preconditions -type PreconditionExecutor struct { +// PreconditionsPhase handles precondition evaluation +type PreconditionsPhase struct { apiClient hyperfleet_api.Client + config *config_loader.Config log logger.Logger + // results stores the precondition results for later retrieval + results []PreconditionResult + // outcome stores the overall outcome for later retrieval + outcome *PreconditionsOutcome } -// newPreconditionExecutor creates a new precondition executor -// NOTE: Caller (NewExecutor) is responsible for config validation -func newPreconditionExecutor(config *ExecutorConfig) *PreconditionExecutor { - return &PreconditionExecutor{ - apiClient: config.APIClient, - log: config.Logger, +// NewPreconditionsPhase creates a new preconditions phase +func NewPreconditionsPhase(apiClient hyperfleet_api.Client, config *config_loader.Config, log logger.Logger) *PreconditionsPhase { + return &PreconditionsPhase{ + apiClient: apiClient, + config: config, + log: log, } } -// ExecuteAll executes all preconditions in sequence +// Name returns the phase identifier +func (p *PreconditionsPhase) Name() ExecutionPhase { + return PhasePreconditions +} + +// ShouldSkip determines if this phase should be skipped +func (p *PreconditionsPhase) ShouldSkip(execCtx *ExecutionContext) (bool, string) { + if len(p.config.Spec.Preconditions) == 0 { + return true, "no preconditions configured" + } + return false, "" +} + +// Execute runs precondition evaluation logic +func (p *PreconditionsPhase) Execute(ctx context.Context, execCtx *ExecutionContext) error { + p.log.Infof(ctx, "Evaluating %d preconditions", len(p.config.Spec.Preconditions)) + + outcome := p.executeAll(ctx, p.config.Spec.Preconditions, execCtx) + p.results = outcome.Results + p.outcome = &PreconditionsOutcome{ + AllMatched: outcome.AllMatched, + Results: outcome.Results, + Error: outcome.Error, + NotMetReason: outcome.NotMetReason, + } + + if outcome.Error != nil { + // Process execution error: precondition evaluation failed + execCtx.SetError("PreconditionFailed", outcome.Error.Error()) + execCtx.SetSkipped("PreconditionFailed", outcome.Error.Error()) + return fmt.Errorf("precondition evaluation failed: %w", outcome.Error) + } + + if !outcome.AllMatched { + // Business outcome: precondition not satisfied (not an error) + execCtx.SetSkipped("PreconditionNotMet", outcome.NotMetReason) + p.log.Infof(ctx, "Preconditions not met: %s", outcome.NotMetReason) + } else { + p.log.Infof(ctx, "All %d preconditions passed", len(outcome.Results)) + } + + return nil +} + +// Results returns the precondition evaluation results +func (p *PreconditionsPhase) Results() []PreconditionResult { + return p.results +} + +// Outcome returns the overall preconditions outcome +func (p *PreconditionsPhase) Outcome() *PreconditionsOutcome { + return p.outcome +} + +// executeAll executes all preconditions in sequence // Returns a high-level outcome with match status and individual results -func (pe *PreconditionExecutor) ExecuteAll(ctx context.Context, preconditions []config_loader.Precondition, execCtx *ExecutionContext) *PreconditionsOutcome { +func (p *PreconditionsPhase) executeAll(ctx context.Context, preconditions []config_loader.Precondition, execCtx *ExecutionContext) *PreconditionsOutcome { results := make([]PreconditionResult, 0, len(preconditions)) for _, precond := range preconditions { - result, err := pe.executePrecondition(ctx, precond, execCtx) + result, err := p.executePrecondition(ctx, precond, execCtx) results = append(results, result) if err != nil { // Execution error (API call failed, parse error, etc.) errCtx := logger.WithErrorField(ctx, err) - pe.log.Errorf(errCtx, "Precondition[%s] evaluated: FAILED", precond.Name) + p.log.Errorf(errCtx, "Precondition[%s] evaluated: FAILED", precond.Name) return &PreconditionsOutcome{ AllMatched: false, Results: results, @@ -49,7 +108,7 @@ func (pe *PreconditionExecutor) ExecuteAll(ctx context.Context, preconditions [] if !result.Matched { // Business outcome: precondition not satisfied - pe.log.Infof(ctx, "Precondition[%s] evaluated: NOT_MET - %s", precond.Name, formatConditionDetails(result)) + p.log.Infof(ctx, "Precondition[%s] evaluated: NOT_MET - %s", precond.Name, formatConditionDetails(result)) return &PreconditionsOutcome{ AllMatched: false, Results: results, @@ -58,7 +117,7 @@ func (pe *PreconditionExecutor) ExecuteAll(ctx context.Context, preconditions [] } } - pe.log.Infof(ctx, "Precondition[%s] evaluated: MET", precond.Name) + p.log.Infof(ctx, "Precondition[%s] evaluated: MET", precond.Name) } // All preconditions matched @@ -70,7 +129,7 @@ func (pe *PreconditionExecutor) ExecuteAll(ctx context.Context, preconditions [] } // executePrecondition executes a single precondition -func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond config_loader.Precondition, execCtx *ExecutionContext) (PreconditionResult, error) { +func (p *PreconditionsPhase) executePrecondition(ctx context.Context, precond config_loader.Precondition, execCtx *ExecutionContext) (PreconditionResult, error) { result := PreconditionResult{ Name: precond.Name, Status: StatusSuccess, @@ -79,12 +138,12 @@ func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond // Step 1: Execute log action if configured if precond.Log != nil { - ExecuteLogAction(ctx, precond.Log, execCtx, pe.log) + ExecuteLogAction(ctx, precond.Log, execCtx, p.log) } // Step 2: Make API call if configured if precond.APICall != nil { - apiResult, err := pe.executeAPICall(ctx, precond.APICall, execCtx) + apiResult, err := p.executeAPICall(ctx, precond.APICall, execCtx) if err != nil { result.Status = StatusFailed result.Error = err @@ -123,16 +182,16 @@ func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond // Capture fields from response if len(precond.Capture) > 0 { - pe.log.Debugf(ctx, "Capturing %d fields from API response", len(precond.Capture)) + p.log.Debugf(ctx, "Capturing %d fields from API response", len(precond.Capture)) // Create evaluator with response data only // Both field (JSONPath) and expression (CEL) work on the same source captureCtx := criteria.NewEvaluationContext() captureCtx.SetVariablesFromMap(responseData) - captureEvaluator, evalErr := criteria.NewEvaluator(ctx, captureCtx, pe.log) + captureEvaluator, evalErr := criteria.NewEvaluator(ctx, captureCtx, p.log) if evalErr != nil { - pe.log.Warnf(ctx, "Failed to create capture evaluator: %v", evalErr) + p.log.Warnf(ctx, "Failed to create capture evaluator: %v", evalErr) } else { for _, capture := range precond.Capture { extractResult, err := captureEvaluator.ExtractValue(capture.Field, capture.Expression) @@ -141,12 +200,12 @@ func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond } // Error is not nil when there is field missing that is not a bug, but a valid use case if extractResult.Error != nil { - pe.log.Warnf(ctx, "Failed to capture '%s' with error: %v", capture.Name, extractResult.Error) + p.log.Warnf(ctx, "Failed to capture '%s' with error: %v", capture.Name, extractResult.Error) continue } result.CapturedFields[capture.Name] = extractResult.Value execCtx.Params[capture.Name] = extractResult.Value - pe.log.Debugf(ctx, "Captured %s = %v (from %s)", capture.Name, extractResult.Value, extractResult.Source) + p.log.Debugf(ctx, "Captured %s = %v (from %s)", capture.Name, extractResult.Value, extractResult.Source) } } } @@ -158,7 +217,7 @@ func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond evalCtx := criteria.NewEvaluationContext() evalCtx.SetVariablesFromMap(execCtx.GetCELVariables()) - evaluator, err := criteria.NewEvaluator(ctx, evalCtx, pe.log) + evaluator, err := criteria.NewEvaluator(ctx, evalCtx, p.log) if err != nil { result.Status = StatusFailed result.Error = err @@ -167,7 +226,7 @@ func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond // Evaluate using structured conditions or CEL expression if len(precond.Conditions) > 0 { - pe.log.Debugf(ctx, "Evaluating %d structured conditions", len(precond.Conditions)) + p.log.Debugf(ctx, "Evaluating %d structured conditions", len(precond.Conditions)) condDefs := ToConditionDefs(precond.Conditions) condResult, err := evaluator.EvaluateConditions(condDefs) @@ -183,9 +242,9 @@ func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond // Log individual condition results for _, cr := range condResult.Results { if cr.Matched { - pe.log.Debugf(ctx, "Condition: %s %s %v = %v (matched)", cr.Field, cr.Operator, cr.ExpectedValue, cr.FieldValue) + p.log.Debugf(ctx, "Condition: %s %s %v = %v (matched)", cr.Field, cr.Operator, cr.ExpectedValue, cr.FieldValue) } else { - pe.log.Debugf(ctx, "Condition: %s %s %v = %v (not matched)", cr.Field, cr.Operator, cr.ExpectedValue, cr.FieldValue) + p.log.Debugf(ctx, "Condition: %s %s %v = %v (not matched)", cr.Field, cr.Operator, cr.ExpectedValue, cr.FieldValue) } } @@ -197,7 +256,7 @@ func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond execCtx.AddConditionsEvaluation(PhasePreconditions, precond.Name, condResult.Matched, fieldResults) } else if precond.Expression != "" { // Evaluate CEL expression - pe.log.Debugf(ctx, "Evaluating CEL expression: %s", strings.TrimSpace(precond.Expression)) + p.log.Debugf(ctx, "Evaluating CEL expression: %s", strings.TrimSpace(precond.Expression)) celResult, err := evaluator.EvaluateCEL(strings.TrimSpace(precond.Expression)) if err != nil { result.Status = StatusFailed @@ -207,13 +266,13 @@ func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond result.Matched = celResult.Matched result.CELResult = celResult - pe.log.Debugf(ctx, "CEL result: matched=%v value=%v", celResult.Matched, celResult.Value) + p.log.Debugf(ctx, "CEL result: matched=%v value=%v", celResult.Matched, celResult.Value) // Record CEL evaluation in execution context execCtx.AddCELEvaluation(PhasePreconditions, precond.Name, precond.Expression, celResult.Matched) } else { // No conditions specified - consider it matched - pe.log.Debugf(ctx, "No conditions specified, auto-matched") + p.log.Debugf(ctx, "No conditions specified, auto-matched") result.Matched = true } @@ -221,8 +280,8 @@ func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond } // executeAPICall executes an API call and returns the response body for field capture -func (pe *PreconditionExecutor) executeAPICall(ctx context.Context, apiCall *config_loader.APICall, execCtx *ExecutionContext) ([]byte, error) { - resp, url, err := ExecuteAPICall(ctx, apiCall, execCtx, pe.apiClient, pe.log) +func (p *PreconditionsPhase) executeAPICall(ctx context.Context, apiCall *config_loader.APICall, execCtx *ExecutionContext) ([]byte, error) { + resp, url, err := ExecuteAPICall(ctx, apiCall, execCtx, p.apiClient, p.log) // Validate response - returns APIError with full metadata if validation fails if validationErr := ValidateAPIResponse(resp, err, apiCall.Method, url); validationErr != nil { diff --git a/internal/executor/phase_resources.go b/internal/executor/phase_resources.go new file mode 100644 index 0000000..83e09ee --- /dev/null +++ b/internal/executor/phase_resources.go @@ -0,0 +1,364 @@ +package executor + +import ( + "context" + "fmt" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/resource_applier" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ResourcesPhase handles Kubernetes resource creation/update. +// It uses the ResourceApplier interface to support multiple backends: +// - Direct Kubernetes API (k8s_client) +// - Maestro/OCM ManifestWork (maestro_client) +type ResourcesPhase struct { + applier resource_applier.ResourceApplier + config *config_loader.Config + log logger.Logger + // results stores the resource operation results for later retrieval + results []ResourceResult +} + +// NewResourcesPhase creates a new resources phase. +// The applier parameter can be any implementation of ResourceApplier: +// - k8s_client.Client for direct Kubernetes API access +// - maestro_client.ResourceApplier for ManifestWork-based deployments +func NewResourcesPhase(applier resource_applier.ResourceApplier, config *config_loader.Config, log logger.Logger) *ResourcesPhase { + return &ResourcesPhase{ + applier: applier, + config: config, + log: log, + } +} + +// Name returns the phase identifier +func (p *ResourcesPhase) Name() ExecutionPhase { + return PhaseResources +} + +// ShouldSkip determines if this phase should be skipped +func (p *ResourcesPhase) ShouldSkip(execCtx *ExecutionContext) (bool, string) { + // Skip if no resources configured + if len(p.config.Spec.Resources) == 0 { + return true, "no resources configured" + } + + // Skip if resources were marked to be skipped (e.g., precondition not met) + if execCtx.Adapter.ResourcesSkipped { + return true, execCtx.Adapter.SkipReason + } + + return false, "" +} + +// Execute runs resource creation/update logic +func (p *ResourcesPhase) Execute(ctx context.Context, execCtx *ExecutionContext) error { + p.log.Infof(ctx, "Processing %d resources", len(p.config.Spec.Resources)) + + results, err := p.executeAll(ctx, p.config.Spec.Resources, execCtx) + p.results = results + + if err != nil { + execCtx.SetError("ResourceFailed", err.Error()) + return fmt.Errorf("resource execution failed: %w", err) + } + + p.log.Infof(ctx, "Successfully processed %d resources", len(results)) + return nil +} + +// Results returns the resource operation results +func (p *ResourcesPhase) Results() []ResourceResult { + return p.results +} + +// executeAll creates/updates all resources using the ResourceApplier. +// It prepares all resources first (build manifests, discover existing), then applies them in batch. +// Returns results for each resource and updates the execution context. +func (p *ResourcesPhase) executeAll(ctx context.Context, resources []config_loader.Resource, execCtx *ExecutionContext) ([]ResourceResult, error) { + if execCtx.Resources == nil { + execCtx.Resources = make(map[string]*unstructured.Unstructured) + } + + if p.applier == nil { + return nil, fmt.Errorf("resource applier not configured") + } + + // Phase 1: Prepare all resources (build manifests, discover existing) + p.log.Infof(ctx, "Preparing %d resources...", len(resources)) + resourcesToApply, prepResults, err := p.prepareResources(ctx, resources, execCtx) + if err != nil { + return prepResults, err + } + + // Phase 2: Apply all resources using the ResourceApplier + p.log.Infof(ctx, "Applying %d resources...", len(resourcesToApply)) + applyResults, err := p.applier.ApplyResources(ctx, resourcesToApply) + if err != nil { + // Convert apply results to ResourceResult and set error context + results := p.convertApplyResults(ctx, applyResults, prepResults, execCtx) + return results, err + } + + // Convert successful results + results := p.convertApplyResults(ctx, applyResults, prepResults, execCtx) + return results, nil +} + +// prepareResources builds manifests and discovers existing resources for all resources. +// Returns the prepared ResourceToApply list and partial results if any preparation fails. +func (p *ResourcesPhase) prepareResources(ctx context.Context, resources []config_loader.Resource, execCtx *ExecutionContext) ([]resource_applier.ResourceToApply, []ResourceResult, error) { + resourcesToApply := make([]resource_applier.ResourceToApply, 0, len(resources)) + results := make([]ResourceResult, 0, len(resources)) + + for _, resource := range resources { + result := ResourceResult{ + Name: resource.Name, + Status: StatusSuccess, + } + + // Step 1: Build the manifest + p.log.Debugf(ctx, "Building manifest for resource[%s]...", resource.Name) + obj, err := p.buildManifest(ctx, resource, execCtx) + if err != nil { + result.Status = StatusFailed + result.Error = err + results = append(results, result) + return resourcesToApply, results, NewExecutorError(PhaseResources, resource.Name, "failed to build manifest", err) + } + + // Extract resource info + gvk := obj.GroupVersionKind() + result.Kind = gvk.Kind + result.Namespace = obj.GetNamespace() + result.ResourceName = obj.GetName() + + p.log.Debugf(ctx, "Resource[%s] manifest built: %s/%s in namespace %s", + resource.Name, gvk.Kind, obj.GetName(), obj.GetNamespace()) + + // Step 2: Discover existing resource + existingResource, err := p.discoverExisting(ctx, resource, obj, execCtx) + if err != nil { + result.Status = StatusFailed + result.Error = err + results = append(results, result) + return resourcesToApply, results, NewExecutorError(PhaseResources, resource.Name, "failed to discover existing resource", err) + } + + if existingResource != nil { + p.log.Debugf(ctx, "Resource[%s] existing found: %s/%s", + resource.Name, existingResource.GetNamespace(), existingResource.GetName()) + } else { + p.log.Debugf(ctx, "Resource[%s] no existing found, will create", resource.Name) + } + + // Add to batch + resourcesToApply = append(resourcesToApply, resource_applier.ResourceToApply{ + Name: resource.Name, + Manifest: obj, + Existing: existingResource, + Options: &resource_applier.ApplyOptions{ + RecreateOnChange: resource.RecreateOnChange, + }, + }) + results = append(results, result) + } + + return resourcesToApply, results, nil +} + +// discoverExisting discovers the existing resource using Discovery config or by name. +func (p *ResourcesPhase) discoverExisting(ctx context.Context, resource config_loader.Resource, obj *unstructured.Unstructured, execCtx *ExecutionContext) (*unstructured.Unstructured, error) { + gvk := obj.GroupVersionKind() + + var existingResource *unstructured.Unstructured + var err error + + if resource.Discovery != nil { + // Use Discovery config to find existing resource (e.g., by label selector) + existingResource, err = p.discoverExistingResource(ctx, gvk, resource.Discovery, execCtx) + } else { + // No Discovery config - lookup by name from manifest + existingResource, err = p.applier.GetResource(ctx, gvk, obj.GetNamespace(), obj.GetName()) + } + + // NotFound is not an error - it means the resource doesn't exist yet + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + + return existingResource, nil +} + +// convertApplyResults converts resource_applier.ApplyResourcesResult to []ResourceResult. +// It also updates the execution context with the resulting resources. +func (p *ResourcesPhase) convertApplyResults(ctx context.Context, applyResults *resource_applier.ApplyResourcesResult, prepResults []ResourceResult, execCtx *ExecutionContext) []ResourceResult { + results := make([]ResourceResult, len(prepResults)) + copy(results, prepResults) + + for i, applyResult := range applyResults.Results { + if i >= len(results) { + break + } + + // Update result with apply outcome + if applyResult.Error != nil { + results[i].Status = StatusFailed + results[i].Error = applyResult.Error + // Set ExecutionError for the first failure + execCtx.Adapter.ExecutionError = &ExecutionError{ + Phase: string(PhaseResources), + Step: applyResult.Name, + Message: applyResult.Error.Error(), + } + errCtx := logger.WithK8sResult(ctx, "FAILED") + errCtx = logger.WithErrorField(errCtx, applyResult.Error) + p.log.Errorf(errCtx, "Resource[%s] processed: FAILED", applyResult.Name) + } else if applyResult.ApplyResult != nil { + results[i].Operation = applyResult.Operation + results[i].OperationReason = applyResult.Reason + results[i].Resource = applyResult.Resource + + // Store resource in execution context + if results[i].Resource != nil { + execCtx.Resources[applyResult.Name] = results[i].Resource + } + + successCtx := logger.WithK8sResult(ctx, "SUCCESS") + p.log.Infof(successCtx, "Resource[%s] processed: operation=%s reason=%s", + applyResult.Name, results[i].Operation, results[i].OperationReason) + } + } + + return results +} + +// buildManifest builds an unstructured manifest from the resource configuration +func (p *ResourcesPhase) buildManifest(ctx context.Context, resource config_loader.Resource, execCtx *ExecutionContext) (*unstructured.Unstructured, error) { + var manifestData map[string]interface{} + + // Get manifest (inline or loaded from ref) + if resource.Manifest != nil { + switch m := resource.Manifest.(type) { + case map[string]interface{}: + manifestData = m + case map[interface{}]interface{}: + manifestData = utils.ConvertToStringKeyMap(m) + default: + return nil, fmt.Errorf("unsupported manifest type: %T", resource.Manifest) + } + } else { + return nil, fmt.Errorf("no manifest specified for resource %s", resource.Name) + } + + // Deep copy to avoid modifying the original + copiedData, err := utils.DeepCopyMap(manifestData) + if err != nil { + p.log.Warnf(ctx, "Failed to deep copy manifest data: %v. Using shallow copy.", err) + copiedData = utils.DeepCopyMapWithFallback(manifestData) + } + + // Render all template strings in the manifest + renderedData, err := manifest.RenderManifestData(copiedData, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render manifest templates: %w", err) + } + + // Convert to unstructured + obj := &unstructured.Unstructured{Object: renderedData} + + // Validate manifest (K8s fields + generation annotation) + if err := manifest.ValidateManifest(obj); err != nil { + return nil, err + } + + return obj, nil +} + +// discoverExistingResource discovers an existing resource using the discovery config +func (p *ResourcesPhase) discoverExistingResource(ctx context.Context, gvk schema.GroupVersionKind, discovery *config_loader.DiscoveryConfig, execCtx *ExecutionContext) (*unstructured.Unstructured, error) { + if p.applier == nil { + return nil, fmt.Errorf("resource applier not configured") + } + + // Render discovery namespace template + // Empty namespace means all namespaces (normalized from "*" at config load time) + namespace, err := utils.RenderTemplate(discovery.Namespace, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render namespace template: %w", err) + } + + // Check if discovering by name + if discovery.ByName != "" { + name, err := utils.RenderTemplate(discovery.ByName, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render byName template: %w", err) + } + return p.applier.GetResource(ctx, gvk, namespace, name) + } + + // Discover by label selector + if discovery.BySelectors != nil && len(discovery.BySelectors.LabelSelector) > 0 { + // Render label selector templates + renderedLabels := make(map[string]string) + for k, v := range discovery.BySelectors.LabelSelector { + renderedK, err := utils.RenderTemplate(k, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render label key template: %w", err) + } + renderedV, err := utils.RenderTemplate(v, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render label value template: %w", err) + } + renderedLabels[renderedK] = renderedV + } + + labelSelector := manifest.BuildLabelSelector(renderedLabels) + + discoveryConfig := &manifest.DiscoveryConfig{ + Namespace: namespace, + LabelSelector: labelSelector, + } + + list, err := p.applier.DiscoverResources(ctx, gvk, discoveryConfig) + if err != nil { + return nil, err + } + + if len(list.Items) == 0 { + return nil, apierrors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, "") + } + + return manifest.GetLatestGenerationFromList(list), nil + } + + return nil, fmt.Errorf("discovery config must specify byName or bySelectors") +} + +// GetResourceAsMap converts an unstructured resource to a map for CEL evaluation +func GetResourceAsMap(resource *unstructured.Unstructured) map[string]interface{} { + if resource == nil { + return nil + } + return resource.Object +} + +// BuildResourcesMap builds a map of all resources for CEL evaluation. +// Resource names are used directly as keys (snake_case and camelCase both work in CEL). +// Name validation (no hyphens, no duplicates) is done at config load time. +func BuildResourcesMap(resources map[string]*unstructured.Unstructured) map[string]interface{} { + result := make(map[string]interface{}) + for name, resource := range resources { + if resource != nil { + result[name] = resource.Object + } + } + return result +} diff --git a/internal/executor/post_action_executor_test.go b/internal/executor/post_action_executor_test.go index 494d718..f0e037f 100644 --- a/internal/executor/post_action_executor_test.go +++ b/internal/executor/post_action_executor_test.go @@ -5,7 +5,6 @@ import ( "net/http" "testing" - "github.com/cloudevents/sdk-go/v2/event" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" @@ -14,16 +13,17 @@ import ( "github.com/stretchr/testify/require" ) -// testPAE creates a PostActionExecutor for tests -func testPAE() *PostActionExecutor { - return newPostActionExecutor(&ExecutorConfig{ - Logger: logger.NewTestLogger(), - APIClient: newMockAPIClient(), - }) +// testPostActionsPhase creates a PostActionsPhase for tests +func testPostActionsPhase() *PostActionsPhase { + return NewPostActionsPhase( + newMockAPIClient(), + &config_loader.Config{}, + logger.NewTestLogger(), + ) } func TestBuildPayload(t *testing.T) { - pae := testPAE() + phase := testPostActionsPhase() tests := []struct { name string @@ -95,10 +95,10 @@ func TestBuildPayload(t *testing.T) { for k, v := range tt.params { evalCtx.Set(k, v) } - evaluator, err := criteria.NewEvaluator(context.Background(), evalCtx, pae.log) + evaluator, err := criteria.NewEvaluator(context.Background(), evalCtx, phase.log) assert.NoError(t, err) - result, err := pae.buildPayload(context.Background(), tt.build, evaluator, tt.params) + result, err := phase.buildPayload(context.Background(), tt.build, evaluator, tt.params) if tt.expectError { assert.Error(t, err) @@ -112,7 +112,7 @@ func TestBuildPayload(t *testing.T) { } func TestBuildMapPayload(t *testing.T) { - pae := testPAE() + phase := testPostActionsPhase() tests := []struct { name string @@ -175,9 +175,9 @@ func TestBuildMapPayload(t *testing.T) { for k, v := range tt.params { evalCtx.Set(k, v) } - evaluator, err := criteria.NewEvaluator(context.Background(), evalCtx, pae.log) + evaluator, err := criteria.NewEvaluator(context.Background(), evalCtx, phase.log) require.NoError(t, err) - result, err := pae.buildMapPayload(context.Background(), tt.input, evaluator, tt.params) + result, err := phase.buildMapPayload(context.Background(), tt.input, evaluator, tt.params) if tt.expectError { assert.Error(t, err) @@ -191,7 +191,7 @@ func TestBuildMapPayload(t *testing.T) { } func TestProcessValue(t *testing.T) { - pae := testPAE() + phase := testPostActionsPhase() tests := []struct { name string @@ -285,9 +285,9 @@ func TestProcessValue(t *testing.T) { for k, v := range tt.evalCtxData { evalCtx.Set(k, v) } - evaluator, err := criteria.NewEvaluator(context.Background(), evalCtx, pae.log) + evaluator, err := criteria.NewEvaluator(context.Background(), evalCtx, phase.log) require.NoError(t, err) - result, err := pae.processValue(context.Background(), tt.value, evaluator, tt.params) + result, err := phase.processValue(context.Background(), tt.value, evaluator, tt.params) if tt.expectError { assert.Error(t, err) @@ -300,7 +300,7 @@ func TestProcessValue(t *testing.T) { } } -func TestPostActionExecutor_ExecuteAll(t *testing.T) { +func TestPostActionsPhase_ExecuteAll(t *testing.T) { tests := []struct { name string postConfig *config_loader.PostConfig @@ -376,16 +376,15 @@ func TestPostActionExecutor_ExecuteAll(t *testing.T) { mockClient.DoResponse = tt.mockResponse } - pae := newPostActionExecutor(&ExecutorConfig{ - APIClient: mockClient, - Logger: logger.NewTestLogger(), - }) + phase := NewPostActionsPhase( + mockClient, + &config_loader.Config{}, + logger.NewTestLogger(), + ) - evt := event.New() - evt.SetID("test-event") execCtx := NewExecutionContext(context.Background(), map[string]interface{}{}, nil) - results, err := pae.ExecuteAll( + results, err := phase.executeAll( context.Background(), tt.postConfig, execCtx, diff --git a/internal/executor/resource_executor.go b/internal/executor/resource_executor.go deleted file mode 100644 index 4e398ca..0000000 --- a/internal/executor/resource_executor.go +++ /dev/null @@ -1,537 +0,0 @@ -package executor - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/mitchellh/copystructure" - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/generation" - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" - "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/constants" - "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// ResourceExecutor creates and updates Kubernetes resources -type ResourceExecutor struct { - k8sClient k8s_client.K8sClient - log logger.Logger -} - -// newResourceExecutor creates a new resource executor -// NOTE: Caller (NewExecutor) is responsible for config validation -func newResourceExecutor(config *ExecutorConfig) *ResourceExecutor { - return &ResourceExecutor{ - k8sClient: config.K8sClient, - log: config.Logger, - } -} - -// ExecuteAll creates/updates all resources in sequence -// Returns results for each resource and updates the execution context -func (re *ResourceExecutor) ExecuteAll(ctx context.Context, resources []config_loader.Resource, execCtx *ExecutionContext) ([]ResourceResult, error) { - if execCtx.Resources == nil { - execCtx.Resources = make(map[string]*unstructured.Unstructured) - } - results := make([]ResourceResult, 0, len(resources)) - - for _, resource := range resources { - result, err := re.executeResource(ctx, resource, execCtx) - results = append(results, result) - - if err != nil { - return results, err - } - } - - return results, nil -} - -// executeResource creates or updates a single Kubernetes resource -func (re *ResourceExecutor) executeResource(ctx context.Context, resource config_loader.Resource, execCtx *ExecutionContext) (ResourceResult, error) { - result := ResourceResult{ - Name: resource.Name, - Status: StatusSuccess, - } - - // Step 1: Build the manifest - re.log.Debugf(ctx, "Building manifest from config") - manifest, err := re.buildManifest(ctx, resource, execCtx) - if err != nil { - result.Status = StatusFailed - result.Error = err - return result, NewExecutorError(PhaseResources, resource.Name, "failed to build manifest", err) - } - - // Extract resource info - gvk := manifest.GroupVersionKind() - result.Kind = gvk.Kind - result.Namespace = manifest.GetNamespace() - result.ResourceName = manifest.GetName() - - // Add K8s resource context fields for logging (separate from event resource_type/resource_id) - ctx = logger.WithK8sKind(ctx, result.Kind) - ctx = logger.WithK8sName(ctx, result.ResourceName) - ctx = logger.WithK8sNamespace(ctx, result.Namespace) - - re.log.Debugf(ctx, "Resource[%s] manifest built: namespace=%s", resource.Name, manifest.GetNamespace()) - - // Step 2: Delegate to applyResource which handles discovery, generation comparison, and operations - return re.applyResource(ctx, resource, manifest, execCtx) -} - -// applyResource handles resource discovery, generation comparison, and execution of operations. -// It discovers existing resources (via Discovery config or by name), compares generations, -// and performs the appropriate operation (create, update, recreate, or skip). -func (re *ResourceExecutor) applyResource(ctx context.Context, resource config_loader.Resource, manifest *unstructured.Unstructured, execCtx *ExecutionContext) (ResourceResult, error) { - result := ResourceResult{ - Name: resource.Name, - Kind: manifest.GetKind(), - Namespace: manifest.GetNamespace(), - ResourceName: manifest.GetName(), - Status: StatusSuccess, - } - - if re.k8sClient == nil { - result.Status = StatusFailed - result.Error = fmt.Errorf("kubernetes client not configured") - return result, NewExecutorError(PhaseResources, resource.Name, "kubernetes client not configured", result.Error) - } - - gvk := manifest.GroupVersionKind() - - // Discover existing resource - var existingResource *unstructured.Unstructured - var err error - if resource.Discovery != nil { - // Use Discovery config to find existing resource (e.g., by label selector) - re.log.Debugf(ctx, "Discovering existing resource using discovery config...") - existingResource, err = re.discoverExistingResource(ctx, gvk, resource.Discovery, execCtx) - } else { - // No Discovery config - lookup by name from manifest - re.log.Debugf(ctx, "Looking up existing resource by name...") - existingResource, err = re.k8sClient.GetResource(ctx, gvk, manifest.GetNamespace(), manifest.GetName()) - } - - // Fail fast on any error except NotFound (which means resource doesn't exist yet) - if err != nil && !apierrors.IsNotFound(err) { - result.Status = StatusFailed - result.Error = err - return result, NewExecutorError(PhaseResources, resource.Name, "failed to find existing resource", err) - } - - if existingResource != nil { - re.log.Debugf(ctx, "Existing resource found: %s/%s", existingResource.GetNamespace(), existingResource.GetName()) - } else { - re.log.Debugf(ctx, "No existing resource found, will create") - } - - // Extract manifest generation once for use in comparison and logging - manifestGen := generation.GetGenerationFromUnstructured(manifest) - - // Add observed_generation to context early so it appears in all subsequent logs - ctx = logger.WithObservedGeneration(ctx, manifestGen) - - // Get existing generation (0 if not found) - var existingGen int64 - if existingResource != nil { - existingGen = generation.GetGenerationFromUnstructured(existingResource) - } - - // Compare generations to determine operation - decision := generation.CompareGenerations(manifestGen, existingGen, existingResource != nil) - - // Handle recreateOnChange override - result.Operation = decision.Operation - result.OperationReason = decision.Reason - if decision.Operation == generation.OperationUpdate && resource.RecreateOnChange { - result.Operation = generation.OperationRecreate - result.OperationReason = fmt.Sprintf("%s, recreateOnChange=true", decision.Reason) - } - - // Log the operation decision - re.log.Infof(ctx, "Resource[%s] is processing: operation=%s reason=%s", - resource.Name, strings.ToUpper(string(result.Operation)), result.OperationReason) - - // Execute the operation - switch result.Operation { - case generation.OperationCreate: - result.Resource, err = re.createResource(ctx, manifest) - case generation.OperationUpdate: - result.Resource, err = re.updateResource(ctx, existingResource, manifest) - case generation.OperationRecreate: - result.Resource, err = re.recreateResource(ctx, existingResource, manifest) - case generation.OperationSkip: - result.Resource = existingResource - } - - if err != nil { - result.Status = StatusFailed - result.Error = err - // Set ExecutionError for K8s operation failure - execCtx.Adapter.ExecutionError = &ExecutionError{ - Phase: string(PhaseResources), - Step: resource.Name, - Message: err.Error(), - } - errCtx := logger.WithK8sResult(ctx, "FAILED") - errCtx = logger.WithErrorField(errCtx, err) - re.log.Errorf(errCtx, "Resource[%s] processed: operation=%s reason=%s", - resource.Name, result.Operation, result.OperationReason) - return result, NewExecutorError(PhaseResources, resource.Name, - fmt.Sprintf("failed to %s resource", result.Operation), err) - } - successCtx := logger.WithK8sResult(ctx, "SUCCESS") - re.log.Infof(successCtx, "Resource[%s] processed: operation=%s reason=%s", - resource.Name, result.Operation, result.OperationReason) - - // Store resource in execution context - if result.Resource != nil { - execCtx.Resources[resource.Name] = result.Resource - re.log.Debugf(ctx, "Resource stored in context as '%s'", resource.Name) - } - - return result, nil -} - -// buildManifest builds an unstructured manifest from the resource configuration -func (re *ResourceExecutor) buildManifest(ctx context.Context, resource config_loader.Resource, execCtx *ExecutionContext) (*unstructured.Unstructured, error) { - var manifestData map[string]interface{} - - // Get manifest (inline or loaded from ref) - if resource.Manifest != nil { - switch m := resource.Manifest.(type) { - case map[string]interface{}: - manifestData = m - case map[interface{}]interface{}: - manifestData = convertToStringKeyMap(m) - default: - return nil, fmt.Errorf("unsupported manifest type: %T", resource.Manifest) - } - } else { - return nil, fmt.Errorf("no manifest specified for resource %s", resource.Name) - } - - // Deep copy to avoid modifying the original - manifestData = deepCopyMap(ctx, manifestData, re.log) - - // Render all template strings in the manifest - renderedData, err := renderManifestTemplates(manifestData, execCtx.Params) - if err != nil { - return nil, fmt.Errorf("failed to render manifest templates: %w", err) - } - - // Convert to unstructured - obj := &unstructured.Unstructured{Object: renderedData} - - // Validate manifest - if err := validateManifest(obj); err != nil { - return nil, err - } - - return obj, nil -} - -// validateManifest validates a Kubernetes manifest has all required fields and annotations -func validateManifest(obj *unstructured.Unstructured) error { - // Validate required Kubernetes fields - if obj.GetAPIVersion() == "" { - return fmt.Errorf("manifest missing apiVersion") - } - if obj.GetKind() == "" { - return fmt.Errorf("manifest missing kind") - } - if obj.GetName() == "" { - return fmt.Errorf("manifest missing metadata.name") - } - - // Validate required generation annotation - if generation.GetGenerationFromUnstructured(obj) == 0 { - return fmt.Errorf("manifest missing required annotation %q", constants.AnnotationGeneration) - } - - return nil -} - -// discoverExistingResource discovers an existing resource using the discovery config -func (re *ResourceExecutor) discoverExistingResource(ctx context.Context, gvk schema.GroupVersionKind, discovery *config_loader.DiscoveryConfig, execCtx *ExecutionContext) (*unstructured.Unstructured, error) { - if re.k8sClient == nil { - return nil, fmt.Errorf("kubernetes client not configured") - } - - // Render discovery namespace template - // Empty namespace means all namespaces (normalized from "*" at config load time) - namespace, err := renderTemplate(discovery.Namespace, execCtx.Params) - if err != nil { - return nil, fmt.Errorf("failed to render namespace template: %w", err) - } - - // Check if discovering by name - if discovery.ByName != "" { - name, err := renderTemplate(discovery.ByName, execCtx.Params) - if err != nil { - return nil, fmt.Errorf("failed to render byName template: %w", err) - } - return re.k8sClient.GetResource(ctx, gvk, namespace, name) - } - - // Discover by label selector - if discovery.BySelectors != nil && len(discovery.BySelectors.LabelSelector) > 0 { - // Render label selector templates - renderedLabels := make(map[string]string) - for k, v := range discovery.BySelectors.LabelSelector { - renderedK, err := renderTemplate(k, execCtx.Params) - if err != nil { - return nil, fmt.Errorf("failed to render label key template: %w", err) - } - renderedV, err := renderTemplate(v, execCtx.Params) - if err != nil { - return nil, fmt.Errorf("failed to render label value template: %w", err) - } - renderedLabels[renderedK] = renderedV - } - - labelSelector := k8s_client.BuildLabelSelector(renderedLabels) - - discoveryConfig := &k8s_client.DiscoveryConfig{ - Namespace: namespace, - LabelSelector: labelSelector, - } - - list, err := re.k8sClient.DiscoverResources(ctx, gvk, discoveryConfig) - if err != nil { - return nil, err - } - - if len(list.Items) == 0 { - return nil, apierrors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, "") - } - - return generation.GetLatestGenerationFromList(list), nil - } - - return nil, fmt.Errorf("discovery config must specify byName or bySelectors") -} - -// createResource creates a new Kubernetes resource -func (re *ResourceExecutor) createResource(ctx context.Context, manifest *unstructured.Unstructured) (*unstructured.Unstructured, error) { - if re.k8sClient == nil { - return nil, fmt.Errorf("kubernetes client not configured") - } - - return re.k8sClient.CreateResource(ctx, manifest) -} - -// updateResource updates an existing Kubernetes resource -func (re *ResourceExecutor) updateResource(ctx context.Context, existing, manifest *unstructured.Unstructured) (*unstructured.Unstructured, error) { - if re.k8sClient == nil { - return nil, fmt.Errorf("kubernetes client not configured") - } - - // Preserve resourceVersion from existing for update - manifest.SetResourceVersion(existing.GetResourceVersion()) - manifest.SetUID(existing.GetUID()) - - return re.k8sClient.UpdateResource(ctx, manifest) -} - -// recreateResource deletes and recreates a Kubernetes resource -// It waits for the resource to be fully deleted before creating the new one -// to avoid race conditions with Kubernetes asynchronous deletion -func (re *ResourceExecutor) recreateResource(ctx context.Context, existing, manifest *unstructured.Unstructured) (*unstructured.Unstructured, error) { - if re.k8sClient == nil { - return nil, fmt.Errorf("kubernetes client not configured") - } - - gvk := existing.GroupVersionKind() - namespace := existing.GetNamespace() - name := existing.GetName() - - // Delete the existing resource - re.log.Debugf(ctx, "Deleting resource for recreation") - if err := re.k8sClient.DeleteResource(ctx, gvk, namespace, name); err != nil { - return nil, fmt.Errorf("failed to delete resource for recreation: %w", err) - } - - // Wait for the resource to be fully deleted - re.log.Debugf(ctx, "Waiting for resource deletion to complete") - if err := re.waitForDeletion(ctx, gvk, namespace, name); err != nil { - return nil, fmt.Errorf("failed waiting for resource deletion: %w", err) - } - - // Create the new resource - re.log.Debugf(ctx, "Creating new resource after deletion confirmed") - return re.k8sClient.CreateResource(ctx, manifest) -} - -// waitForDeletion polls until the resource is confirmed deleted or context times out -// Returns nil when the resource is confirmed gone (NotFound), or an error otherwise -func (re *ResourceExecutor) waitForDeletion(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) error { - const pollInterval = 100 * time.Millisecond - - ticker := time.NewTicker(pollInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - re.log.Warnf(ctx, "Context cancelled/timed out while waiting for deletion") - return fmt.Errorf("context cancelled while waiting for resource deletion: %w", ctx.Err()) - case <-ticker.C: - _, err := re.k8sClient.GetResource(ctx, gvk, namespace, name) - if err != nil { - // NotFound means the resource is deleted - this is success - if apierrors.IsNotFound(err) { - re.log.Debugf(ctx, "Resource deletion confirmed") - return nil - } - // Any other error is unexpected - errCtx := logger.WithErrorField(ctx, err) - re.log.Errorf(errCtx, "Error checking resource deletion status") - return fmt.Errorf("error checking deletion status: %w", err) - } - // Resource still exists, continue polling - re.log.Debugf(ctx, "Resource still exists, waiting for deletion...") - } - } -} - -// convertToStringKeyMap converts map[interface{}]interface{} to map[string]interface{} -func convertToStringKeyMap(m map[interface{}]interface{}) map[string]interface{} { - result := make(map[string]interface{}) - for k, v := range m { - strKey := fmt.Sprintf("%v", k) - switch val := v.(type) { - case map[interface{}]interface{}: - result[strKey] = convertToStringKeyMap(val) - case []interface{}: - result[strKey] = convertSlice(val) - default: - result[strKey] = v - } - } - return result -} - -// convertSlice converts slice elements recursively -func convertSlice(s []interface{}) []interface{} { - result := make([]interface{}, len(s)) - for i, v := range s { - switch val := v.(type) { - case map[interface{}]interface{}: - result[i] = convertToStringKeyMap(val) - case []interface{}: - result[i] = convertSlice(val) - default: - result[i] = v - } - } - return result -} - -// deepCopyMap creates a deep copy of a map using github.com/mitchellh/copystructure. -// This handles non-JSON-serializable types (channels, functions, time.Time, etc.) -// and preserves type information (e.g., int64 stays int64, not float64). -// If deep copy fails, it falls back to a shallow copy and logs a warning. -// WARNING: Shallow copy means nested maps/slices will share references with the original, -// which could lead to unexpected mutations. -func deepCopyMap(ctx context.Context, m map[string]interface{}, log logger.Logger) map[string]interface{} { - if m == nil { - return nil - } - - copied, err := copystructure.Copy(m) - if err != nil { - // Fallback to shallow copy if deep copy fails - log.Warnf(ctx, "Failed to deep copy map: %v. Falling back to shallow copy.", err) - result := make(map[string]interface{}) - for k, v := range m { - result[k] = v - } - return result - } - - result, ok := copied.(map[string]interface{}) - if !ok { - // Should not happen, but handle gracefully - result := make(map[string]interface{}) - for k, v := range m { - result[k] = v - } - return result - } - - return result -} - -// renderManifestTemplates recursively renders all template strings in a manifest -func renderManifestTemplates(data map[string]interface{}, params map[string]interface{}) (map[string]interface{}, error) { - result := make(map[string]interface{}) - - for k, v := range data { - renderedKey, err := renderTemplate(k, params) - if err != nil { - return nil, fmt.Errorf("failed to render key '%s': %w", k, err) - } - - renderedValue, err := renderValue(v, params) - if err != nil { - return nil, fmt.Errorf("failed to render value for key '%s': %w", k, err) - } - - result[renderedKey] = renderedValue - } - - return result, nil -} - -// renderValue renders a value recursively -func renderValue(v interface{}, params map[string]interface{}) (interface{}, error) { - switch val := v.(type) { - case string: - return renderTemplate(val, params) - case map[string]interface{}: - return renderManifestTemplates(val, params) - case map[interface{}]interface{}: - converted := convertToStringKeyMap(val) - return renderManifestTemplates(converted, params) - case []interface{}: - result := make([]interface{}, len(val)) - for i, item := range val { - rendered, err := renderValue(item, params) - if err != nil { - return nil, err - } - result[i] = rendered - } - return result, nil - default: - return v, nil - } -} - -// GetResourceAsMap converts an unstructured resource to a map for CEL evaluation -func GetResourceAsMap(resource *unstructured.Unstructured) map[string]interface{} { - if resource == nil { - return nil - } - return resource.Object -} - -// BuildResourcesMap builds a map of all resources for CEL evaluation. -// Resource names are used directly as keys (snake_case and camelCase both work in CEL). -// Name validation (no hyphens, no duplicates) is done at config load time. -func BuildResourcesMap(resources map[string]*unstructured.Unstructured) map[string]interface{} { - result := make(map[string]interface{}) - for name, resource := range resources { - if resource != nil { - result[name] = resource.Object - } - } - return result -} diff --git a/internal/executor/resource_executor_test.go b/internal/executor/resource_executor_test.go index 47d2d99..a116e9a 100644 --- a/internal/executor/resource_executor_test.go +++ b/internal/executor/resource_executor_test.go @@ -1,11 +1,11 @@ package executor import ( - "context" "testing" - "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDeepCopyMap_BasicTypes(t *testing.T) { @@ -17,7 +17,8 @@ func TestDeepCopyMap_BasicTypes(t *testing.T) { "null": nil, } - copied := deepCopyMap(context.Background(), original, logger.NewTestLogger()) + copied, err := utils.DeepCopyMap(original) + require.NoError(t, err) // Verify values are copied correctly assert.Equal(t, "hello", copied["string"]) @@ -26,15 +27,12 @@ func TestDeepCopyMap_BasicTypes(t *testing.T) { assert.Equal(t, true, copied["bool"]) assert.Nil(t, copied["null"]) - // Verify no warnings logged - // Verify mutation doesn't affect original copied["string"] = "modified" assert.Equal(t, "hello", original["string"], "Original should not be modified") } func TestDeepCopyMap_NestedMaps(t *testing.T) { - original := map[string]interface{}{ "level1": map[string]interface{}{ "level2": map[string]interface{}{ @@ -43,9 +41,8 @@ func TestDeepCopyMap_NestedMaps(t *testing.T) { }, } - copied := deepCopyMap(context.Background(), original, logger.NewTestLogger()) - - // Verify deep copy works + copied, err := utils.DeepCopyMap(original) + require.NoError(t, err) // Modify the copied nested map level1 := copied["level1"].(map[string]interface{}) @@ -59,7 +56,6 @@ func TestDeepCopyMap_NestedMaps(t *testing.T) { } func TestDeepCopyMap_Slices(t *testing.T) { - original := map[string]interface{}{ "items": []interface{}{"a", "b", "c"}, "nested": []interface{}{ @@ -67,7 +63,8 @@ func TestDeepCopyMap_Slices(t *testing.T) { }, } - copied := deepCopyMap(context.Background(), original, logger.NewTestLogger()) + copied, err := utils.DeepCopyMap(original) + require.NoError(t, err) // Modify copied slice copiedItems := copied["items"].([]interface{}) @@ -80,16 +77,14 @@ func TestDeepCopyMap_Slices(t *testing.T) { func TestDeepCopyMap_Channel(t *testing.T) { // copystructure handles channels properly (creates new channel) - ch := make(chan int, 5) original := map[string]interface{}{ "channel": ch, "normal": "value", } - copied := deepCopyMap(context.Background(), original, logger.NewTestLogger()) - - // copystructure handles channels - no warning expected + copied, err := utils.DeepCopyMap(original) + require.NoError(t, err) // Normal values are copied assert.Equal(t, "value", copied["normal"]) @@ -102,16 +97,14 @@ func TestDeepCopyMap_Channel(t *testing.T) { func TestDeepCopyMap_Function(t *testing.T) { // copystructure handles functions (copies the function pointer) - fn := func() string { return "hello" } original := map[string]interface{}{ "func": fn, "normal": "value", } - copied := deepCopyMap(context.Background(), original, logger.NewTestLogger()) - - // copystructure handles functions - no warning expected + copied, err := utils.DeepCopyMap(original) + require.NoError(t, err) // Normal values are copied assert.Equal(t, "value", copied["normal"]) @@ -123,7 +116,6 @@ func TestDeepCopyMap_Function(t *testing.T) { func TestDeepCopyMap_NestedWithChannel(t *testing.T) { // Test that nested maps are deep copied even when channels are present - ch := make(chan int) nested := map[string]interface{}{"mutable": "original"} original := map[string]interface{}{ @@ -131,9 +123,8 @@ func TestDeepCopyMap_NestedWithChannel(t *testing.T) { "nested": nested, } - copied := deepCopyMap(context.Background(), original, logger.NewTestLogger()) - - // copystructure handles this properly - no warning expected + copied, err := utils.DeepCopyMap(original) + require.NoError(t, err) // Modify the copied nested map copiedNested := copied["nested"].(map[string]interface{}) @@ -145,9 +136,9 @@ func TestDeepCopyMap_NestedWithChannel(t *testing.T) { } func TestDeepCopyMap_EmptyMap(t *testing.T) { - original := map[string]interface{}{} - copied := deepCopyMap(context.Background(), original, logger.NewTestLogger()) + copied, err := utils.DeepCopyMap(original) + require.NoError(t, err) assert.NotNil(t, copied) assert.Empty(t, copied) @@ -162,8 +153,8 @@ func TestDeepCopyMap_DeepCopyVerification(t *testing.T) { }, } - // Should not panic - copied := deepCopyMap(context.Background(), original, logger.NewTestLogger()) + copied, err := utils.DeepCopyMap(original) + require.NoError(t, err) assert.Equal(t, "value", copied["string"]) @@ -176,15 +167,13 @@ func TestDeepCopyMap_DeepCopyVerification(t *testing.T) { } func TestDeepCopyMap_NilMap(t *testing.T) { - - copied := deepCopyMap(context.Background(), nil, logger.NewTestLogger()) - + copied, err := utils.DeepCopyMap(nil) + require.NoError(t, err) assert.Nil(t, copied) } func TestDeepCopyMap_KubernetesManifest(t *testing.T) { // Test with a realistic Kubernetes manifest structure - original := map[string]interface{}{ "apiVersion": "v1", "kind": "ConfigMap", @@ -201,7 +190,8 @@ func TestDeepCopyMap_KubernetesManifest(t *testing.T) { }, } - copied := deepCopyMap(context.Background(), original, logger.NewTestLogger()) + copied, err := utils.DeepCopyMap(original) + require.NoError(t, err) // Modify copied manifest copiedMetadata := copied["metadata"].(map[string]interface{}) @@ -214,10 +204,10 @@ func TestDeepCopyMap_KubernetesManifest(t *testing.T) { assert.Equal(t, "test", originalLabels["app"], "Original manifest should not be modified") } -// TestDeepCopyMap_Context ensures the function is used correctly in context +// TestDeepCopyMap_RealWorldContext ensures the function is used correctly in context func TestDeepCopyMap_RealWorldContext(t *testing.T) { // This simulates how deepCopyMap is used in executeResource - manifest := map[string]interface{}{ + manifestData := map[string]interface{}{ "apiVersion": "v1", "kind": "Namespace", "metadata": map[string]interface{}{ @@ -226,13 +216,40 @@ func TestDeepCopyMap_RealWorldContext(t *testing.T) { } // Deep copy before template rendering - copied := deepCopyMap(context.Background(), manifest, logger.NewTestLogger()) + copied, err := utils.DeepCopyMap(manifestData) + require.NoError(t, err) // Simulate template rendering modifying the copy copiedMetadata := copied["metadata"].(map[string]interface{}) copiedMetadata["name"] = "rendered-namespace" // Original template should remain unchanged for next iteration - originalMetadata := manifest["metadata"].(map[string]interface{}) + originalMetadata := manifestData["metadata"].(map[string]interface{}) assert.Equal(t, "{{ .namespace }}", originalMetadata["name"]) } + +// TestDeepCopyMapWithFallback tests the fallback version +func TestDeepCopyMapWithFallback(t *testing.T) { + original := map[string]interface{}{ + "key": "value", + "nested": map[string]interface{}{ + "inner": "deep", + }, + } + + copied := utils.DeepCopyMapWithFallback(original) + assert.NotNil(t, copied) + assert.Equal(t, "value", copied["key"]) + + // Verify it's a deep copy + copiedNested := copied["nested"].(map[string]interface{}) + copiedNested["inner"] = "modified" + + originalNested := original["nested"].(map[string]interface{}) + assert.Equal(t, "deep", originalNested["inner"], "Original should not be modified") +} + +func TestDeepCopyMapWithFallback_NilMap(t *testing.T) { + copied := utils.DeepCopyMapWithFallback(nil) + assert.Nil(t, copied) +} diff --git a/internal/executor/types.go b/internal/executor/types.go index 72f1f94..8d53534 100644 --- a/internal/executor/types.go +++ b/internal/executor/types.go @@ -7,9 +7,9 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/generation" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -68,11 +68,15 @@ type ExecutorConfig struct { // Executor processes CloudEvents according to the adapter configuration type Executor struct { - config *ExecutorConfig - precondExecutor *PreconditionExecutor - resourceExecutor *ResourceExecutor - postActionExecutor *PostActionExecutor - log logger.Logger + config *ExecutorConfig + log logger.Logger + // pipeline holds the phase-based execution pipeline + pipeline *Pipeline + // phases holds references to individual phases for result retrieval + paramExtractionPhase *ParamExtractionPhase + preconditionsPhase *PreconditionsPhase + resourcesPhase *ResourcesPhase + postActionsPhase *PostActionsPhase } // ExecutionResult contains the result of processing an event @@ -134,7 +138,7 @@ type ResourceResult struct { // Status is the result status Status ExecutionStatus // Operation is the operation performed (create, update, recreate, skip) - Operation generation.Operation + Operation manifest.Operation // Resource is the created/updated resource (if successful) Resource *unstructured.Unstructured // OperationReason explains why this operation was performed diff --git a/internal/executor/utils.go b/internal/executor/utils.go index 05ce96f..f2b2a89 100644 --- a/internal/executor/utils.go +++ b/internal/executor/utils.go @@ -1,13 +1,10 @@ package executor import ( - "bytes" "context" "fmt" "net/http" - "strconv" "strings" - "text/template" "time" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" @@ -15,8 +12,7 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" apierrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" - "golang.org/x/text/cases" - "golang.org/x/text/language" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" ) // ToConditionDefs converts config_loader.Condition slice to criteria.ConditionDef slice. @@ -282,117 +278,16 @@ func ValidateAPIResponse(resp *hyperfleet_api.Response, err error, method, url s return nil } -// renderTemplate renders a Go template string with the given data -// templateFuncs provides common functions for Go templates -var templateFuncs = template.FuncMap{ - // Time functions - "now": time.Now, - "date": func(layout string, t time.Time) string { - return t.Format(layout) - }, - "dateFormat": func(layout string, t time.Time) string { - return t.Format(layout) - }, - // String functions - "lower": strings.ToLower, - "upper": strings.ToUpper, - "title": func(s string) string { - return cases.Title(language.English).String(s) - }, - "trim": strings.TrimSpace, - "replace": strings.ReplaceAll, - "contains": strings.Contains, - "hasPrefix": strings.HasPrefix, - "hasSuffix": strings.HasSuffix, - // Default value function - "default": func(defaultVal, val interface{}) interface{} { - if val == nil || val == "" { - return defaultVal - } - return val - }, - // Quote function - "quote": func(s string) string { - return fmt.Sprintf("%q", s) - }, - // Type conversion functions - "int": func(v interface{}) int { - switch val := v.(type) { - case int: - return val - case int64: - return int(val) - case float64: - return int(val) - case string: - i, _ := strconv.Atoi(val) //nolint:errcheck // returns 0 on error, which is acceptable - return i - default: - return 0 - } - }, - "int64": func(v interface{}) int64 { - switch val := v.(type) { - case int: - return int64(val) - case int64: - return val - case float64: - return int64(val) - case string: - i, _ := strconv.ParseInt(val, 10, 64) //nolint:errcheck // returns 0 on error, which is acceptable - return i - default: - return 0 - } - }, - "float64": func(v interface{}) float64 { - switch val := v.(type) { - case int: - return float64(val) - case int64: - return float64(val) - case float64: - return val - case string: - f, _ := strconv.ParseFloat(val, 64) //nolint:errcheck // returns 0 on error, which is acceptable - return f - default: - return 0 - } - }, - "string": func(v interface{}) string { - return fmt.Sprintf("%v", v) - }, -} - -// This is a shared utility used across preconditions, resources, and post-actions +// renderTemplate renders a Go template string with the given data. +// This delegates to utils.RenderTemplate which provides the canonical template functions. func renderTemplate(templateStr string, data map[string]interface{}) (string, error) { - // If no template delimiters, return as-is - if !strings.Contains(templateStr, "{{") { - return templateStr, nil - } - - tmpl, err := template.New("template").Funcs(templateFuncs).Option("missingkey=error").Parse(templateStr) - if err != nil { - return "", fmt.Errorf("failed to parse template: %w", err) - } - - var buf bytes.Buffer - if err := tmpl.Execute(&buf, data); err != nil { - return "", fmt.Errorf("failed to execute template: %w", err) - } - - return buf.String(), nil + return utils.RenderTemplate(templateStr, data) } -// renderTemplateBytes renders a Go template string and returns bytes +// renderTemplateBytes renders a Go template string and returns bytes. +// This delegates to utils.RenderTemplateBytes. func renderTemplateBytes(templateStr string, data map[string]interface{}) ([]byte, error) { - result, err := renderTemplate(templateStr, data) - if err != nil { - return nil, err - } - return []byte(result), nil + return utils.RenderTemplateBytes(templateStr, data) } // executionErrorToMap converts an ExecutionError struct to a map for CEL evaluation diff --git a/internal/executor/utils_test.go b/internal/executor/utils_test.go index af2ec75..30c58a4 100644 --- a/internal/executor/utils_test.go +++ b/internal/executor/utils_test.go @@ -12,6 +12,7 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" apierrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -783,7 +784,7 @@ func TestExecuteLogAction(t *testing.T) { } } -// TestConvertToStringKeyMap tests map key conversion +// TestConvertToStringKeyMap tests map key conversion (now in utils package) func TestConvertToStringKeyMap(t *testing.T) { tests := []struct { name string @@ -860,76 +861,7 @@ func TestConvertToStringKeyMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := convertToStringKeyMap(tt.input) - assert.Equal(t, tt.expected, result) - }) - } -} - -// TestConvertSlice tests slice element conversion -func TestConvertSlice(t *testing.T) { - tests := []struct { - name string - input []interface{} - expected []interface{} - }{ - { - name: "empty slice", - input: []interface{}{}, - expected: []interface{}{}, - }, - { - name: "simple values", - input: []interface{}{"a", "b", "c"}, - expected: []interface{}{"a", "b", "c"}, - }, - { - name: "numeric values", - input: []interface{}{1, 2, 3}, - expected: []interface{}{1, 2, 3}, - }, - { - name: "nested maps in slice", - input: []interface{}{ - map[interface{}]interface{}{"key": "value1"}, - map[interface{}]interface{}{"key": "value2"}, - }, - expected: []interface{}{ - map[string]interface{}{"key": "value1"}, - map[string]interface{}{"key": "value2"}, - }, - }, - { - name: "nested slices", - input: []interface{}{ - []interface{}{"a", "b"}, - []interface{}{"c", "d"}, - }, - expected: []interface{}{ - []interface{}{"a", "b"}, - []interface{}{"c", "d"}, - }, - }, - { - name: "mixed types", - input: []interface{}{ - "string", - 123, - map[interface{}]interface{}{"nested": "map"}, - []interface{}{"nested", "slice"}, - }, - expected: []interface{}{ - "string", - 123, - map[string]interface{}{"nested": "map"}, - []interface{}{"nested", "slice"}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := convertSlice(tt.input) + result := utils.ConvertToStringKeyMap(tt.input) assert.Equal(t, tt.expected, result) }) } diff --git a/internal/k8s_client/apply.go b/internal/k8s_client/apply.go new file mode 100644 index 0000000..c1ddeb2 --- /dev/null +++ b/internal/k8s_client/apply.go @@ -0,0 +1,261 @@ +package k8s_client + +import ( + "context" + "fmt" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/resource_applier" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Type aliases for backward compatibility. +// These types are now defined in the resource_applier package. +type ( + ApplyOptions = resource_applier.ApplyOptions + ApplyResult = resource_applier.ApplyResult + ResourceToApply = resource_applier.ResourceToApply + ResourceApplyResult = resource_applier.ResourceApplyResult + ApplyResourcesResult = resource_applier.ApplyResourcesResult +) + +// ApplyResource creates or updates a Kubernetes resource based on generation comparison. +// This is the generation-aware upsert operation that mirrors maestro_client.ApplyManifestWork. +// +// If the resource doesn't exist, it creates it. +// If it exists and the generation differs, it updates (or recreates if RecreateOnChange=true). +// If it exists and the generation matches, it skips the update (idempotent). +// +// The manifest must have the hyperfleet.io/generation annotation set. +// Use manifest.ValidateManifest() to validate before calling. +// +// Parameters: +// - ctx: Context for the operation +// - newManifest: The desired resource state (must have generation annotation) +// - existing: The current resource state (nil if not found/discovered) +// - opts: Apply options (can be nil for defaults) +// +// Returns: +// - ApplyResult containing the resource and operation details +// - Error if the operation fails +// +// Example: +// +// // Discover existing resource first +// existing, _ := client.DiscoverResources(ctx, gvk, discovery) +// var existingResource *unstructured.Unstructured +// if len(existing.Items) > 0 { +// existingResource = manifest.GetLatestGenerationFromList(existing) +// } +// +// // Apply with generation tracking +// result, err := client.ApplyResource(ctx, newManifest, existingResource, &ApplyOptions{ +// RecreateOnChange: true, +// }) +func (c *Client) ApplyResource( + ctx context.Context, + newManifest *unstructured.Unstructured, + existing *unstructured.Unstructured, + opts *ApplyOptions, +) (*ApplyResult, error) { + if newManifest == nil { + return nil, fmt.Errorf("new manifest cannot be nil") + } + + if opts == nil { + opts = &ApplyOptions{} + } + + // Get generation from new manifest + newGen := manifest.GetGenerationFromUnstructured(newManifest) + + // Get existing generation (0 if not found) + var existingGen int64 + if existing != nil { + existingGen = manifest.GetGenerationFromUnstructured(existing) + } + + // Compare generations to determine operation + decision := manifest.CompareGenerations(newGen, existingGen, existing != nil) + + result := &ApplyResult{ + Operation: decision.Operation, + Reason: decision.Reason, + } + + // Handle recreateOnChange override + if decision.Operation == manifest.OperationUpdate && opts.RecreateOnChange { + result.Operation = manifest.OperationRecreate + result.Reason = fmt.Sprintf("%s, recreateOnChange=true", decision.Reason) + } + + gvk := newManifest.GroupVersionKind() + name := newManifest.GetName() + + c.log.Infof(ctx, "ApplyResource %s/%s: operation=%s reason=%s", + gvk.Kind, name, result.Operation, result.Reason) + + // Execute the operation + var err error + switch result.Operation { + case manifest.OperationCreate: + result.Resource, err = c.CreateResource(ctx, newManifest) + + case manifest.OperationUpdate: + // Preserve resourceVersion and UID from existing for update + newManifest.SetResourceVersion(existing.GetResourceVersion()) + newManifest.SetUID(existing.GetUID()) + result.Resource, err = c.UpdateResource(ctx, newManifest) + + case manifest.OperationRecreate: + result.Resource, err = c.recreateResource(ctx, existing, newManifest) + + case manifest.OperationSkip: + result.Resource = existing + } + + if err != nil { + return nil, fmt.Errorf("failed to %s resource %s/%s: %w", + result.Operation, gvk.Kind, name, err) + } + + return result, nil +} + +// ApplyResources applies multiple Kubernetes resources in sequence. +// This is a batch version of ApplyResource that processes resources one by one, +// stopping on first error (fail-fast behavior). +// +// Each resource in the batch can have its own ApplyOptions (e.g., RecreateOnChange). +// Resources are processed in order, and results are returned for all processed resources. +// +// Parameters: +// - ctx: Context for the operation +// - resources: List of resources to apply (must have generation annotations) +// +// Returns: +// - ApplyResourcesResult containing results for all processed resources +// - Error if any resource fails (results will contain partial results up to failure) +// +// Example: +// +// resources := []k8s_client.ResourceToApply{ +// {Name: "configmap", Manifest: cm, Existing: existingCM, Options: nil}, +// {Name: "deployment", Manifest: deploy, Existing: existingDeploy, Options: &k8s_client.ApplyOptions{RecreateOnChange: true}}, +// } +// result, err := client.ApplyResources(ctx, resources) +// if err != nil { +// // Check result.Results for partial results +// } +func (c *Client) ApplyResources( + ctx context.Context, + resources []ResourceToApply, +) (*ApplyResourcesResult, error) { + result := &ApplyResourcesResult{ + Results: make([]*ResourceApplyResult, 0, len(resources)), + } + + for _, r := range resources { + resourceResult := &ResourceApplyResult{ + Name: r.Name, + } + + // Validate manifest + if r.Manifest == nil { + resourceResult.Error = fmt.Errorf("manifest cannot be nil for resource %s", r.Name) + result.Results = append(result.Results, resourceResult) + result.FailedCount++ + return result, resourceResult.Error + } + + // Extract resource info for logging and result + resourceResult.Kind = r.Manifest.GetKind() + resourceResult.Namespace = r.Manifest.GetNamespace() + resourceResult.ResourceName = r.Manifest.GetName() + + // Apply the resource + applyResult, err := c.ApplyResource(ctx, r.Manifest, r.Existing, r.Options) + if err != nil { + resourceResult.Error = err + result.Results = append(result.Results, resourceResult) + result.FailedCount++ + return result, fmt.Errorf("failed to apply resource %s: %w", r.Name, err) + } + + resourceResult.ApplyResult = applyResult + result.Results = append(result.Results, resourceResult) + result.SuccessCount++ + + c.log.Infof(ctx, "Applied resource %s: operation=%s", r.Name, applyResult.Operation) + } + + c.log.Infof(ctx, "Applied %d resources successfully", result.SuccessCount) + return result, nil +} + +// recreateResource deletes and recreates a Kubernetes resource. +// It waits for the resource to be fully deleted before creating the new one +// to avoid race conditions with Kubernetes asynchronous deletion. +func (c *Client) recreateResource( + ctx context.Context, + existing *unstructured.Unstructured, + newManifest *unstructured.Unstructured, +) (*unstructured.Unstructured, error) { + gvk := existing.GroupVersionKind() + namespace := existing.GetNamespace() + name := existing.GetName() + + // Delete the existing resource + c.log.Debugf(ctx, "Deleting resource for recreation: %s/%s", gvk.Kind, name) + if err := c.DeleteResource(ctx, gvk, namespace, name); err != nil { + return nil, fmt.Errorf("failed to delete resource for recreation: %w", err) + } + + // Wait for the resource to be fully deleted + c.log.Debugf(ctx, "Waiting for resource deletion to complete: %s/%s", gvk.Kind, name) + if err := c.waitForDeletion(ctx, gvk, namespace, name); err != nil { + return nil, fmt.Errorf("failed waiting for resource deletion: %w", err) + } + + // Create the new resource + c.log.Debugf(ctx, "Creating new resource after deletion confirmed: %s/%s", gvk.Kind, name) + return c.CreateResource(ctx, newManifest) +} + +// waitForDeletion polls until the resource is confirmed deleted or context times out. +// Returns nil when the resource is confirmed gone (NotFound), or an error otherwise. +func (c *Client) waitForDeletion( + ctx context.Context, + gvk schema.GroupVersionKind, + namespace, name string, +) error { + const pollInterval = 100 * time.Millisecond + + ticker := time.NewTicker(pollInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + c.log.Warnf(ctx, "Context cancelled/timed out while waiting for deletion of %s/%s", gvk.Kind, name) + return fmt.Errorf("context cancelled while waiting for resource deletion: %w", ctx.Err()) + case <-ticker.C: + _, err := c.GetResource(ctx, gvk, namespace, name) + if err != nil { + // NotFound means the resource is deleted - this is success + if apierrors.IsNotFound(err) { + c.log.Debugf(ctx, "Resource deletion confirmed: %s/%s", gvk.Kind, name) + return nil + } + // Any other error is unexpected + c.log.Errorf(ctx, "Error checking deletion status for %s/%s: %v", gvk.Kind, name, err) + return fmt.Errorf("error checking deletion status: %w", err) + } + // Resource still exists, continue polling + c.log.Debugf(ctx, "Resource %s/%s still exists, waiting for deletion...", gvk.Kind, name) + } + } +} diff --git a/internal/k8s_client/discovery.go b/internal/k8s_client/discovery.go index 00ff9cd..b1a191e 100644 --- a/internal/k8s_client/discovery.go +++ b/internal/k8s_client/discovery.go @@ -2,64 +2,20 @@ package k8s_client import ( "context" - "sort" - "strings" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" ) -// Discovery defines the interface for resource discovery configuration. -// Any type implementing this interface can be used with Client.DiscoverResources(). -type Discovery interface { - // GetNamespace returns the namespace to search in. - // Empty string means cluster-scoped or all namespaces. - GetNamespace() string +// DiscoveryConfig is an alias to manifest.DiscoveryConfig for convenience. +// This allows k8s_client users to use k8s_client.DiscoveryConfig without importing manifest package. +type DiscoveryConfig = manifest.DiscoveryConfig - // GetName returns the resource name for single-resource discovery. - // Empty string means use selector-based discovery. - GetName() string - - // GetLabelSelector returns the label selector string (e.g., "app=myapp,env=prod"). - // Empty string means no label filtering. - GetLabelSelector() string - - // IsSingleResource returns true if discovering by name (single resource). - IsSingleResource() bool -} - -// DiscoveryConfig is the default implementation of the Discovery interface. -type DiscoveryConfig struct { - // Namespace to search in (empty for cluster-scoped or all namespaces) - Namespace string - - // ByName specifies the resource name for single-resource discovery. - // If set, GetResource is used instead of ListResources. - ByName string - - // LabelSelector is the label selector string (e.g., "app=myapp,env=prod") - LabelSelector string -} - -// GetNamespace implements Discovery.GetNamespace -func (d *DiscoveryConfig) GetNamespace() string { - return d.Namespace -} - -// GetName implements Discovery.GetName -func (d *DiscoveryConfig) GetName() string { - return d.ByName -} - -// GetLabelSelector implements Discovery.GetLabelSelector -func (d *DiscoveryConfig) GetLabelSelector() string { - return d.LabelSelector -} - -// IsSingleResource implements Discovery.IsSingleResource -func (d *DiscoveryConfig) IsSingleResource() bool { - return d.ByName != "" -} +// BuildLabelSelector is an alias to manifest.BuildLabelSelector for convenience. +// Converts a map of labels to a selector string. +// Example: {"env": "prod", "app": "myapp"} -> "app=myapp,env=prod" +var BuildLabelSelector = manifest.BuildLabelSelector // DiscoverResources discovers Kubernetes resources based on the Discovery configuration. // @@ -73,7 +29,7 @@ func (d *DiscoveryConfig) IsSingleResource() bool { // LabelSelector: "app=myapp", // } // list, err := client.DiscoverResources(ctx, gvk, discovery) -func (c *Client) DiscoverResources(ctx context.Context, gvk schema.GroupVersionKind, discovery Discovery) (*unstructured.UnstructuredList, error) { +func (c *Client) DiscoverResources(ctx context.Context, gvk schema.GroupVersionKind, discovery manifest.Discovery) (*unstructured.UnstructuredList, error) { list := &unstructured.UnstructuredList{} list.SetGroupVersionKind(gvk) if discovery == nil { @@ -98,25 +54,3 @@ func (c *Client) DiscoverResources(ctx context.Context, gvk schema.GroupVersionK // List resources by selector return c.ListResources(ctx, gvk, discovery.GetNamespace(), discovery.GetLabelSelector()) } - -// BuildLabelSelector converts a map of labels to a selector string. -// Keys are sorted alphabetically for deterministic output. -// Example: {"env": "prod", "app": "myapp"} -> "app=myapp,env=prod" -func BuildLabelSelector(labels map[string]string) string { - if len(labels) == 0 { - return "" - } - - // Sort keys for deterministic output - keys := make([]string, 0, len(labels)) - for k := range labels { - keys = append(keys, k) - } - sort.Strings(keys) - - pairs := make([]string, 0, len(labels)) - for _, k := range keys { - pairs = append(pairs, k+"="+labels[k]) - } - return strings.Join(pairs, ",") -} diff --git a/internal/k8s_client/interface.go b/internal/k8s_client/interface.go index a8d0a04..8cd65bb 100644 --- a/internal/k8s_client/interface.go +++ b/internal/k8s_client/interface.go @@ -3,6 +3,7 @@ package k8s_client import ( "context" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/resource_applier" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -10,12 +11,25 @@ import ( // K8sClient defines the interface for Kubernetes operations. // This interface allows for easy mocking in unit tests without requiring // a real Kubernetes cluster or DryRun mode. +// +// K8sClient embeds ResourceApplier, providing unified resource apply operations +// that work across different backends (direct K8s, Maestro, etc.). type K8sClient interface { - // Resource CRUD operations - - // GetResource retrieves a single Kubernetes resource by GVK, namespace, and name. - // Returns the resource or an error if not found. - GetResource(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) (*unstructured.Unstructured, error) + // Embed ResourceApplier for unified apply operations + // This provides: ApplyResources, GetResource, DiscoverResources + resource_applier.ResourceApplier + + // ApplyResource creates or updates a single Kubernetes resource based on generation comparison. + // This is a K8sClient-specific convenience method for single resource operations. + // + // If the resource doesn't exist, it creates it. + // If it exists and the generation differs, it updates (or recreates if RecreateOnChange=true). + // If it exists and the generation matches, it skips the update (idempotent). + // + // The manifest must have the hyperfleet.io/generation annotation set. + ApplyResource(ctx context.Context, newManifest *unstructured.Unstructured, existing *unstructured.Unstructured, opts *ApplyOptions) (*ApplyResult, error) + + // Resource CRUD operations (additional to ResourceApplier) // CreateResource creates a new Kubernetes resource. // Returns the created resource with server-generated fields populated. @@ -28,13 +42,6 @@ type K8sClient interface { // DeleteResource deletes a Kubernetes resource by GVK, namespace, and name. DeleteResource(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) error - // Discovery operations - - // DiscoverResources discovers Kubernetes resources based on the Discovery configuration. - // If Discovery.IsSingleResource() is true, it fetches a single resource by name. - // Otherwise, it lists resources matching the label selector. - DiscoverResources(ctx context.Context, gvk schema.GroupVersionKind, discovery Discovery) (*unstructured.UnstructuredList, error) - // Data extraction operations // ExtractFromSecret extracts a value from a Kubernetes Secret. @@ -48,3 +55,6 @@ type K8sClient interface { // Ensure Client implements K8sClient interface var _ K8sClient = (*Client)(nil) + +// Ensure Client implements ResourceApplier interface +var _ resource_applier.ResourceApplier = (*Client)(nil) diff --git a/internal/k8s_client/mock.go b/internal/k8s_client/mock.go index f148b2a..bd053e1 100644 --- a/internal/k8s_client/mock.go +++ b/internal/k8s_client/mock.go @@ -3,6 +3,7 @@ package k8s_client import ( "context" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -22,6 +23,10 @@ type MockK8sClient struct { UpdateResourceResult *unstructured.Unstructured UpdateResourceError error DeleteResourceError error + ApplyResourceResult *ApplyResult + ApplyResourceError error + ApplyResourcesResult *ApplyResourcesResult + ApplyResourcesError error DiscoverResult *unstructured.UnstructuredList DiscoverError error ExtractSecretResult string @@ -97,8 +102,58 @@ func (m *MockK8sClient) DeleteResource(ctx context.Context, gvk schema.GroupVers return nil } +// ApplyResource implements K8sClient.ApplyResource +func (m *MockK8sClient) ApplyResource(ctx context.Context, newManifest *unstructured.Unstructured, existing *unstructured.Unstructured, opts *ApplyOptions) (*ApplyResult, error) { + if m.ApplyResourceError != nil { + return nil, m.ApplyResourceError + } + if m.ApplyResourceResult != nil { + return m.ApplyResourceResult, nil + } + // Default behavior: store the resource and return create result + key := newManifest.GetNamespace() + "/" + newManifest.GetName() + m.Resources[key] = newManifest + return &ApplyResult{ + Resource: newManifest, + Operation: manifest.OperationCreate, + Reason: "mock apply", + }, nil +} + +// ApplyResources implements K8sClient.ApplyResources +func (m *MockK8sClient) ApplyResources(ctx context.Context, resources []ResourceToApply) (*ApplyResourcesResult, error) { + if m.ApplyResourcesError != nil { + return nil, m.ApplyResourcesError + } + if m.ApplyResourcesResult != nil { + return m.ApplyResourcesResult, nil + } + // Default behavior: apply each resource using ApplyResource + result := &ApplyResourcesResult{ + Results: make([]*ResourceApplyResult, 0, len(resources)), + } + for _, r := range resources { + applyResult, err := m.ApplyResource(ctx, r.Manifest, r.Existing, r.Options) + resourceResult := &ResourceApplyResult{ + Name: r.Name, + Kind: r.Manifest.GetKind(), + Namespace: r.Manifest.GetNamespace(), + ResourceName: r.Manifest.GetName(), + ApplyResult: applyResult, + Error: err, + } + result.Results = append(result.Results, resourceResult) + if err != nil { + result.FailedCount++ + return result, err + } + result.SuccessCount++ + } + return result, nil +} + // DiscoverResources implements K8sClient.DiscoverResources -func (m *MockK8sClient) DiscoverResources(ctx context.Context, gvk schema.GroupVersionKind, discovery Discovery) (*unstructured.UnstructuredList, error) { +func (m *MockK8sClient) DiscoverResources(ctx context.Context, gvk schema.GroupVersionKind, discovery manifest.Discovery) (*unstructured.UnstructuredList, error) { if m.DiscoverError != nil { return nil, m.DiscoverError } diff --git a/internal/maestro_client/operations.go b/internal/maestro_client/operations.go index 768e362..bc6ce94 100644 --- a/internal/maestro_client/operations.go +++ b/internal/maestro_client/operations.go @@ -4,11 +4,12 @@ import ( "context" "encoding/json" - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/generation" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" apperrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kubetypes "k8s.io/apimachinery/pkg/types" workv1 "open-cluster-management.io/api/work/v1" ) @@ -37,14 +38,14 @@ func (c *Client) CreateManifestWork( } // Validate that generation annotations are present (required on ManifestWork and all manifests) - if err := generation.ValidateManifestWorkGeneration(work); err != nil { + if err := manifest.ValidateManifestWorkGeneration(work); err != nil { return nil, apperrors.MaestroError("invalid ManifestWork: %v", err) } // Enrich context with common fields ctx = logger.WithMaestroConsumer(ctx, consumerName) ctx = logger.WithLogField(ctx, "manifestwork", work.Name) - ctx = logger.WithObservedGeneration(ctx, generation.GetGeneration(work.ObjectMeta)) + ctx = logger.WithObservedGeneration(ctx, manifest.GetGeneration(work.ObjectMeta)) c.log.WithFields(map[string]interface{}{ "manifests": len(work.Spec.Workload.Manifests), @@ -197,12 +198,12 @@ func (c *Client) ApplyManifestWork( } // Validate that generation annotations are present (required on ManifestWork and all manifests) - if err := generation.ValidateManifestWorkGeneration(manifestWork); err != nil { + if err := manifest.ValidateManifestWorkGeneration(manifestWork); err != nil { return nil, apperrors.MaestroError("invalid ManifestWork: %v", err) } // Get generation from the work (set by template) - newGeneration := generation.GetGeneration(manifestWork.ObjectMeta) + newGeneration := manifest.GetGeneration(manifestWork.ObjectMeta) // Enrich context with common fields ctx = logger.WithMaestroConsumer(ctx, consumerName) @@ -221,11 +222,11 @@ func (c *Client) ApplyManifestWork( // Get existing generation (0 if not found) var existingGeneration int64 if exists { - existingGeneration = generation.GetGeneration(existing.ObjectMeta) + existingGeneration = manifest.GetGeneration(existing.ObjectMeta) } // Compare generations to determine operation - decision := generation.CompareGenerations(newGeneration, existingGeneration, exists) + decision := manifest.CompareGenerations(newGeneration, existingGeneration, exists) c.log.WithFields(map[string]interface{}{ "operation": decision.Operation, @@ -234,11 +235,11 @@ func (c *Client) ApplyManifestWork( // Execute operation based on comparison result switch decision.Operation { - case generation.OperationCreate: + case manifest.OperationCreate: return c.CreateManifestWork(ctx, consumerName, manifestWork) - case generation.OperationSkip: + case manifest.OperationSkip: return existing, nil - case generation.OperationUpdate: + case manifest.OperationUpdate: // Use Patch instead of Update since Maestro gRPC doesn't support Update patchData, err := createManifestWorkPatch(manifestWork) if err != nil { @@ -262,3 +263,71 @@ func createManifestWorkPatch(work *workv1.ManifestWork) ([]byte, error) { } return json.Marshal(patch) } + +// DiscoverManifest finds manifests within a ManifestWork that match the discovery criteria. +// This is the maestro_client equivalent of k8s_client.DiscoverResources. +// +// Parameters: +// - ctx: Context for the operation +// - consumerName: The target cluster name (Maestro consumer) +// - workName: Name of the ManifestWork to search within +// - discovery: Discovery configuration (namespace, name, or label selector) +// +// Returns: +// - List of matching manifests as unstructured objects +// - Error if ManifestWork not found or discovery fails +// +// Example: +// +// discovery := &manifest.DiscoveryConfig{ +// Namespace: "default", +// LabelSelector: "app=myapp", +// } +// list, err := client.DiscoverManifest(ctx, "cluster-1", "my-work", discovery) +func (c *Client) DiscoverManifest( + ctx context.Context, + consumerName string, + workName string, + discovery manifest.Discovery, +) (*unstructured.UnstructuredList, error) { + ctx = logger.WithMaestroConsumer(ctx, consumerName) + ctx = logger.WithLogField(ctx, "manifestwork", workName) + + c.log.Debug(ctx, "Discovering manifests in ManifestWork") + + // Get the ManifestWork + work, err := c.GetManifestWork(ctx, consumerName, workName) + if err != nil { + return nil, err + } + + // Use shared discovery logic from manifest package + list, err := manifest.DiscoverInManifestWork(work, discovery) + if err != nil { + return nil, apperrors.MaestroError("failed to discover manifests in %s/%s: %v", + consumerName, workName, err) + } + + c.log.WithFields(map[string]interface{}{ + "found": len(list.Items), + }).Debug(ctx, "Discovered manifests in ManifestWork") + + return list, nil +} + +// DiscoverManifestInWork finds manifests within an already-fetched ManifestWork. +// Use this when you already have the ManifestWork object and don't need to fetch it. +// +// Parameters: +// - work: The ManifestWork to search within +// - discovery: Discovery configuration (namespace, name, or label selector) +// +// Returns: +// - List of matching manifests as unstructured objects +// - The manifest with the highest generation if multiple match (use manifest.GetLatestGenerationFromList) +func (c *Client) DiscoverManifestInWork( + work *workv1.ManifestWork, + discovery manifest.Discovery, +) (*unstructured.UnstructuredList, error) { + return manifest.DiscoverInManifestWork(work, discovery) +} diff --git a/internal/maestro_client/operations_test.go b/internal/maestro_client/operations_test.go index c8d624b..29aeef8 100644 --- a/internal/maestro_client/operations_test.go +++ b/internal/maestro_client/operations_test.go @@ -1,14 +1,14 @@ // Package maestro_client tests // -// Note: Tests for generation.ValidateGeneration, generation.ValidateGenerationFromUnstructured, -// and generation.ValidateManifestWorkGeneration are in internal/generation/generation_test.go. +// Note: Tests for manifest.ValidateGeneration, manifest.ValidateGenerationFromUnstructured, +// and manifest.ValidateManifestWorkGeneration are in internal/generation/generation_test.go. // This file contains tests specific to maestro_client functionality. package maestro_client import ( "testing" - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/generation" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/constants" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" workv1 "open-cluster-management.io/api/work/v1" @@ -73,7 +73,7 @@ func TestGetGenerationFromManifestWork(t *testing.T) { if tt.work == nil { result = 0 } else { - result = generation.GetGeneration(tt.work.ObjectMeta) + result = manifest.GetGeneration(tt.work.ObjectMeta) } if result != tt.expected { t.Errorf("expected generation %d, got %d", tt.expected, result) diff --git a/internal/maestro_client/resource_applier.go b/internal/maestro_client/resource_applier.go new file mode 100644 index 0000000..ca8a1d0 --- /dev/null +++ b/internal/maestro_client/resource_applier.go @@ -0,0 +1,295 @@ +package maestro_client + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/resource_applier" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/constants" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + workv1 "open-cluster-management.io/api/work/v1" +) + +// ResourceApplier adapts maestro_client.Client to implement resource_applier.ResourceApplier. +// This allows the executor to use either direct K8s or Maestro/ManifestWork as the backend. +// +// The adapter: +// - Stores all resources in a single ManifestWork +// - Translates individual resource operations to ManifestWork operations +// - Uses the ManifestWork's generation tracking for idempotent updates +type ResourceApplier struct { + client *Client + consumerName string // Target cluster name (Maestro consumer) + workName string // Name of the ManifestWork to manage + workLabels map[string]string // Labels to apply to the ManifestWork + log logger.Logger +} + +// ResourceApplierConfig configures the MaestroResourceApplier. +type ResourceApplierConfig struct { + // ConsumerName is the target cluster name (required) + ConsumerName string + + // WorkName is the name of the ManifestWork to create/update (required) + WorkName string + + // WorkLabels are optional labels to add to the ManifestWork + WorkLabels map[string]string +} + +// NewResourceApplier creates a new MaestroResourceApplier. +// +// Parameters: +// - client: The underlying Maestro client +// - config: Configuration including consumer name and work name +// - log: Logger instance +// +// Example: +// +// applier := NewResourceApplier(maestroClient, &ResourceApplierConfig{ +// ConsumerName: "cluster-1", +// WorkName: "my-adapter-resources", +// WorkLabels: map[string]string{"managed-by": "hyperfleet"}, +// }, log) +func NewResourceApplier(client *Client, config *ResourceApplierConfig, log logger.Logger) (*ResourceApplier, error) { + if client == nil { + return nil, fmt.Errorf("maestro client is required") + } + if config == nil { + return nil, fmt.Errorf("config is required") + } + if config.ConsumerName == "" { + return nil, fmt.Errorf("consumer name is required") + } + if config.WorkName == "" { + return nil, fmt.Errorf("work name is required") + } + + return &ResourceApplier{ + client: client, + consumerName: config.ConsumerName, + workName: config.WorkName, + workLabels: config.WorkLabels, + log: log, + }, nil +} + +// ApplyResources applies multiple resources by bundling them into a ManifestWork. +// All resources are stored in a single ManifestWork for the target cluster. +// +// The operation: +// 1. Builds a ManifestWork with all resources as manifests +// 2. Uses ApplyManifestWork for generation-aware create/update +// 3. Returns results for each resource +func (a *ResourceApplier) ApplyResources( + ctx context.Context, + resources []resource_applier.ResourceToApply, +) (*resource_applier.ApplyResourcesResult, error) { + result := &resource_applier.ApplyResourcesResult{ + Results: make([]*resource_applier.ResourceApplyResult, 0, len(resources)), + } + + if len(resources) == 0 { + return result, nil + } + + // Build ManifestWork from resources + work, err := a.buildManifestWork(resources) + if err != nil { + return nil, fmt.Errorf("failed to build ManifestWork: %w", err) + } + + a.log.Infof(ctx, "Applying %d resources via ManifestWork %s/%s", + len(resources), a.consumerName, a.workName) + + // Apply the ManifestWork (create or update) + appliedWork, err := a.client.ApplyManifestWork(ctx, a.consumerName, work) + if err != nil { + // Convert to result with error + for _, r := range resources { + resourceResult := &resource_applier.ResourceApplyResult{ + Name: r.Name, + Kind: r.Manifest.GetKind(), + Namespace: r.Manifest.GetNamespace(), + ResourceName: r.Manifest.GetName(), + Error: err, + } + result.Results = append(result.Results, resourceResult) + result.FailedCount++ + } + return result, fmt.Errorf("failed to apply ManifestWork: %w", err) + } + + // Determine operation based on ManifestWork result + // Since all resources are in one ManifestWork, they share the same operation + op := a.determineOperation(appliedWork) + + // Build success results for all resources + for _, r := range resources { + resourceResult := &resource_applier.ResourceApplyResult{ + Name: r.Name, + Kind: r.Manifest.GetKind(), + Namespace: r.Manifest.GetNamespace(), + ResourceName: r.Manifest.GetName(), + ApplyResult: &resource_applier.ApplyResult{ + Resource: r.Manifest, // Return the manifest as the "result" + Operation: op, + Reason: fmt.Sprintf("applied via ManifestWork %s", a.workName), + }, + } + result.Results = append(result.Results, resourceResult) + result.SuccessCount++ + } + + a.log.Infof(ctx, "Successfully applied %d resources via ManifestWork", result.SuccessCount) + return result, nil +} + +// GetResource retrieves a resource from the ManifestWork's manifest list. +// It searches the manifests stored in the ManifestWork for a matching resource. +func (a *ResourceApplier) GetResource( + ctx context.Context, + gvk schema.GroupVersionKind, + namespace, name string, +) (*unstructured.Unstructured, error) { + // Get the ManifestWork + work, err := a.client.GetManifestWork(ctx, a.consumerName, a.workName) + if err != nil { + if apierrors.IsNotFound(err) { + // ManifestWork doesn't exist, so resource doesn't exist + gr := schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind} + return nil, apierrors.NewNotFound(gr, name) + } + return nil, err + } + + // Search for the resource in the manifests + for _, m := range work.Spec.Workload.Manifests { + obj, err := manifestToUnstructured(m) + if err != nil { + continue + } + + // Check if this manifest matches + if obj.GetKind() == gvk.Kind && + obj.GetAPIVersion() == gvk.GroupVersion().String() && + obj.GetNamespace() == namespace && + obj.GetName() == name { + return obj, nil + } + } + + // Resource not found in ManifestWork + gr := schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind} + return nil, apierrors.NewNotFound(gr, name) +} + +// DiscoverResources discovers resources within the ManifestWork that match the discovery criteria. +func (a *ResourceApplier) DiscoverResources( + ctx context.Context, + gvk schema.GroupVersionKind, + discovery manifest.Discovery, +) (*unstructured.UnstructuredList, error) { + return a.client.DiscoverManifest(ctx, a.consumerName, a.workName, discovery) +} + +// buildManifestWork creates a ManifestWork containing all resources as manifests. +func (a *ResourceApplier) buildManifestWork(resources []resource_applier.ResourceToApply) (*workv1.ManifestWork, error) { + manifests := make([]workv1.Manifest, 0, len(resources)) + + // Find the highest generation among all resources for the ManifestWork + var maxGeneration int64 + for _, r := range resources { + gen := manifest.GetGenerationFromUnstructured(r.Manifest) + if gen > maxGeneration { + maxGeneration = gen + } + } + + // Convert each resource to a Manifest + for _, r := range resources { + raw, err := json.Marshal(r.Manifest.Object) + if err != nil { + return nil, fmt.Errorf("failed to marshal manifest %s: %w", r.Name, err) + } + manifests = append(manifests, workv1.Manifest{ + RawExtension: runtime.RawExtension{Raw: raw}, + }) + } + + // Build the ManifestWork + work := &workv1.ManifestWork{} + work.Name = a.workName + work.Namespace = a.consumerName + + // Set labels + if a.workLabels != nil { + work.Labels = a.workLabels + } + + // Set generation annotation on ManifestWork + if work.Annotations == nil { + work.Annotations = make(map[string]string) + } + work.Annotations[constants.AnnotationGeneration] = fmt.Sprintf("%d", maxGeneration) + + // Set manifests + work.Spec.Workload.Manifests = manifests + + return work, nil +} + +// determineOperation determines the operation that was performed based on the ManifestWork. +func (a *ResourceApplier) determineOperation(work *workv1.ManifestWork) manifest.Operation { + if work == nil { + return manifest.OperationCreate + } + + // Check if this is a new ManifestWork (no resourceVersion set by server) + // or an updated one + if work.ResourceVersion == "" { + return manifest.OperationCreate + } + + // If the generation matches what we set, it could be create or update + // The actual determination was done in ApplyManifestWork + return manifest.OperationUpdate +} + +// manifestToUnstructured converts a workv1.Manifest to an unstructured object. +func manifestToUnstructured(m workv1.Manifest) (*unstructured.Unstructured, error) { + if m.Raw == nil { + return nil, fmt.Errorf("manifest has no raw data") + } + + var obj map[string]interface{} + if err := json.Unmarshal(m.Raw, &obj); err != nil { + return nil, fmt.Errorf("failed to unmarshal manifest: %w", err) + } + + return &unstructured.Unstructured{Object: obj}, nil +} + +// Ensure ResourceApplier implements resource_applier.ResourceApplier +var _ resource_applier.ResourceApplier = (*ResourceApplier)(nil) + +// ConsumerName returns the target cluster name. +func (a *ResourceApplier) ConsumerName() string { + return a.consumerName +} + +// WorkName returns the ManifestWork name. +func (a *ResourceApplier) WorkName() string { + return a.workName +} + +// Client returns the underlying Maestro client. +func (a *ResourceApplier) Client() *Client { + return a.client +} diff --git a/internal/generation/generation.go b/internal/manifest/generation.go similarity index 60% rename from internal/generation/generation.go rename to internal/manifest/generation.go index 6814f2b..c39fcb6 100644 --- a/internal/generation/generation.go +++ b/internal/manifest/generation.go @@ -1,13 +1,19 @@ -// Package generation provides utilities for generation-based resource tracking. +// Package manifest provides utilities for Kubernetes manifest validation, generation tracking, and discovery. // -// This package handles generation annotation validation, comparison, and extraction -// for both k8s_client (Kubernetes resources) and maestro_client (ManifestWork). -package generation +// This package handles: +// - Manifest validation (apiVersion, kind, name, generation annotation) +// - Generation annotation extraction and comparison +// - ManifestWork validation for OCM +// - Discovery interface for finding resources/manifests +// +// Used by both k8s_client (Kubernetes resources) and maestro_client (ManifestWork). +package manifest import ( "fmt" "sort" "strconv" + "strings" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/constants" apperrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" @@ -232,6 +238,16 @@ func ValidateGenerationFromUnstructured(obj *unstructured.Unstructured) error { return nil } +// ValidateManifest validates a Kubernetes manifest has all required fields and annotations. +// Returns error if: +// - Object is nil +// - apiVersion is missing +// - kind is missing +// - metadata.name is missing +// - generation annotation is missing or invalid +// +// This is used by both k8s_client (for direct K8s resources) and maestro_client (for ManifestWork payloads). + // GetLatestGenerationFromList returns the resource with the highest generation annotation from a list. // It sorts by generation annotation (descending) and uses metadata.name as a secondary sort key // for deterministic behavior when generations are equal. @@ -261,3 +277,159 @@ func GetLatestGenerationFromList(list *unstructured.UnstructuredList) *unstructu return &items[0] } + +// ============================================================================= +// Discovery Interface and Configuration +// ============================================================================= + +// Discovery defines the interface for resource/manifest discovery configuration. +// This interface is used by both k8s_client (for K8s resources) and maestro_client (for ManifestWork manifests). +type Discovery interface { + // GetNamespace returns the namespace to search in. + // Empty string means cluster-scoped or all namespaces. + GetNamespace() string + + // GetName returns the resource name for single-resource discovery. + // Empty string means use selector-based discovery. + GetName() string + + // GetLabelSelector returns the label selector string (e.g., "app=myapp,env=prod"). + // Empty string means no label filtering. + GetLabelSelector() string + + // IsSingleResource returns true if discovering by name (single resource). + IsSingleResource() bool +} + +// DiscoveryConfig is the default implementation of the Discovery interface. +// Used by both k8s_client and maestro_client for consistent discovery configuration. +type DiscoveryConfig struct { + // Namespace to search in (empty for cluster-scoped or all namespaces) + Namespace string + + // ByName specifies the resource name for single-resource discovery. + // If set, discovery returns a single resource by name. + ByName string + + // LabelSelector is the label selector string (e.g., "app=myapp,env=prod") + LabelSelector string +} + +// GetNamespace implements Discovery.GetNamespace +func (d *DiscoveryConfig) GetNamespace() string { + return d.Namespace +} + +// GetName implements Discovery.GetName +func (d *DiscoveryConfig) GetName() string { + return d.ByName +} + +// GetLabelSelector implements Discovery.GetLabelSelector +func (d *DiscoveryConfig) GetLabelSelector() string { + return d.LabelSelector +} + +// IsSingleResource implements Discovery.IsSingleResource +func (d *DiscoveryConfig) IsSingleResource() bool { + return d.ByName != "" +} + +// BuildLabelSelector converts a map of labels to a selector string. +// Keys are sorted alphabetically for deterministic output. +// Example: {"env": "prod", "app": "myapp"} -> "app=myapp,env=prod" +func BuildLabelSelector(labels map[string]string) string { + if len(labels) == 0 { + return "" + } + + // Sort keys for deterministic output + keys := make([]string, 0, len(labels)) + for k := range labels { + keys = append(keys, k) + } + sort.Strings(keys) + + pairs := make([]string, 0, len(labels)) + for _, k := range keys { + pairs = append(pairs, k+"="+labels[k]) + } + return strings.Join(pairs, ",") +} + +// MatchesLabels checks if an object's labels match the given label selector. +// Returns true if all selector labels are present in the object's labels. +func MatchesLabels(obj *unstructured.Unstructured, labelSelector string) bool { + if labelSelector == "" { + return true + } + + objLabels := obj.GetLabels() + if objLabels == nil { + return false + } + + // Parse selector string (e.g., "app=myapp,env=prod") + pairs := strings.Split(labelSelector, ",") + for _, pair := range pairs { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + continue + } + key, value := kv[0], kv[1] + if objLabels[key] != value { + return false + } + } + + return true +} + +// DiscoverInManifestWork finds manifests within a ManifestWork that match the discovery criteria. +// This is used by maestro_client to find specific manifests within a ManifestWork's workload. +// +// Parameters: +// - work: The ManifestWork containing manifests to search +// - discovery: Discovery configuration (namespace, name, or label selector) +// +// Returns: +// - List of matching manifests as unstructured objects +// - The manifest with the highest generation if multiple match +func DiscoverInManifestWork(work *workv1.ManifestWork, discovery Discovery) (*unstructured.UnstructuredList, error) { + list := &unstructured.UnstructuredList{} + + if work == nil || discovery == nil { + return list, nil + } + + for i, m := range work.Spec.Workload.Manifests { + obj := &unstructured.Unstructured{} + if err := obj.UnmarshalJSON(m.Raw); err != nil { + return nil, apperrors.Validation("ManifestWork %q manifest[%d]: failed to unmarshal: %v", + work.Name, i, err) + } + + // Check if manifest matches discovery criteria + if matchesDiscovery(obj, discovery) { + list.Items = append(list.Items, *obj) + } + } + + return list, nil +} + +// matchesDiscovery checks if a manifest matches the discovery criteria +func matchesDiscovery(obj *unstructured.Unstructured, discovery Discovery) bool { + // Check namespace if specified + if ns := discovery.GetNamespace(); ns != "" && obj.GetNamespace() != ns { + return false + } + + // Check name if single resource discovery + if discovery.IsSingleResource() { + return obj.GetName() == discovery.GetName() + } + + // Check label selector + return MatchesLabels(obj, discovery.GetLabelSelector()) +} diff --git a/internal/generation/generation_test.go b/internal/manifest/generation_test.go similarity index 99% rename from internal/generation/generation_test.go rename to internal/manifest/generation_test.go index 7493878..d276a80 100644 --- a/internal/generation/generation_test.go +++ b/internal/manifest/generation_test.go @@ -1,4 +1,4 @@ -package generation +package manifest import ( "testing" diff --git a/internal/manifest/manifest.go b/internal/manifest/manifest.go new file mode 100644 index 0000000..e913961 --- /dev/null +++ b/internal/manifest/manifest.go @@ -0,0 +1,31 @@ +package manifest + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + apperrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" +) + +func ValidateManifest(obj *unstructured.Unstructured) error { + if obj == nil { + return apperrors.Validation("manifest cannot be nil").AsError() + } + + // Validate required Kubernetes fields + if obj.GetAPIVersion() == "" { + return apperrors.Validation("manifest missing apiVersion").AsError() + } + if obj.GetKind() == "" { + return apperrors.Validation("manifest missing kind").AsError() + } + if obj.GetName() == "" { + return apperrors.Validation("manifest missing metadata.name").AsError() + } + + // Validate required generation annotation + if err := ValidateGenerationFromUnstructured(obj); err != nil { + return err + } + + return nil +} diff --git a/internal/manifest/render.go b/internal/manifest/render.go new file mode 100644 index 0000000..bc3c8ea --- /dev/null +++ b/internal/manifest/render.go @@ -0,0 +1,61 @@ +// Package manifest provides utilities for Kubernetes manifest rendering and processing. +package manifest + +import ( + "fmt" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/utils" +) + +// RenderManifestData recursively renders all template strings in a manifest data map. +// Both keys and values are rendered if they contain template expressions. +// +// Parameters: +// - data: The manifest data as a map +// - params: The parameters to use for template rendering +// +// Returns the rendered manifest data or an error if any template fails to render. +func RenderManifestData(data map[string]interface{}, params map[string]interface{}) (map[string]interface{}, error) { + result := make(map[string]interface{}) + + for k, v := range data { + renderedKey, err := utils.RenderTemplate(k, params) + if err != nil { + return nil, fmt.Errorf("failed to render key '%s': %w", k, err) + } + + renderedValue, err := renderValue(v, params) + if err != nil { + return nil, fmt.Errorf("failed to render value for key '%s': %w", k, err) + } + + result[renderedKey] = renderedValue + } + + return result, nil +} + +// renderValue renders a value recursively, handling maps, slices, and strings. +func renderValue(v interface{}, params map[string]interface{}) (interface{}, error) { + switch val := v.(type) { + case string: + return utils.RenderTemplate(val, params) + case map[string]interface{}: + return RenderManifestData(val, params) + case map[interface{}]interface{}: + converted := utils.ConvertToStringKeyMap(val) + return RenderManifestData(converted, params) + case []interface{}: + result := make([]interface{}, len(val)) + for i, item := range val { + rendered, err := renderValue(item, params) + if err != nil { + return nil, err + } + result[i] = rendered + } + return result, nil + default: + return v, nil + } +} diff --git a/internal/resource_applier/interface.go b/internal/resource_applier/interface.go new file mode 100644 index 0000000..6d7f30b --- /dev/null +++ b/internal/resource_applier/interface.go @@ -0,0 +1,48 @@ +package resource_applier + +import ( + "context" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ResourceApplier defines the interface for applying Kubernetes resources. +// This interface abstracts the underlying implementation, allowing resources +// to be applied via different backends: +// - Direct Kubernetes API (k8s_client) +// - Maestro/OCM ManifestWork (maestro_client) +// - Other backends (GitOps, Argo, etc.) +// +// All implementations must support generation-aware apply operations: +// - Create if resource doesn't exist +// - Update if generation changed +// - Skip if generation matches (idempotent) +type ResourceApplier interface { + // ApplyResources applies multiple Kubernetes resources. + // Implementation details vary by backend: + // - k8s_client: applies resources sequentially, stopping on first error + // - maestro_client: bundles all resources into a single ManifestWork + // + // Each resource in the batch can have its own ApplyOptions (e.g., RecreateOnChange). + // Results are returned for all processed resources. + // + // Parameters: + // - ctx: Context for the operation + // - resources: List of resources to apply (must have generation annotations) + // + // Returns: + // - ApplyResourcesResult containing results for all processed resources + // - Error if any resource fails (results will contain partial results up to failure) + ApplyResources(ctx context.Context, resources []ResourceToApply) (*ApplyResourcesResult, error) + + // GetResource retrieves a single Kubernetes resource by GVK, namespace, and name. + // Returns the resource or an error if not found. + GetResource(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) (*unstructured.Unstructured, error) + + // DiscoverResources discovers Kubernetes resources based on the Discovery configuration. + // If Discovery.IsSingleResource() is true, it fetches a single resource by name. + // Otherwise, it lists resources matching the label selector. + DiscoverResources(ctx context.Context, gvk schema.GroupVersionKind, discovery manifest.Discovery) (*unstructured.UnstructuredList, error) +} diff --git a/internal/resource_applier/types.go b/internal/resource_applier/types.go new file mode 100644 index 0000000..5b22033 --- /dev/null +++ b/internal/resource_applier/types.go @@ -0,0 +1,101 @@ +// Package resource_applier provides a unified interface for applying Kubernetes resources +// across different backends (direct K8s API, Maestro/OCM ManifestWork, etc.). +package resource_applier + +import ( + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// ApplyOptions configures the behavior of resource apply operations. +type ApplyOptions struct { + // RecreateOnChange forces delete+create instead of update when resource exists + // and generation has changed. Useful for resources that don't support in-place updates. + RecreateOnChange bool +} + +// ApplyResult contains the result of applying a single resource. +type ApplyResult struct { + // Resource is the resulting Kubernetes resource after the operation + Resource *unstructured.Unstructured + + // Operation is the operation that was performed (create, update, recreate, skip) + Operation manifest.Operation + + // Reason explains why the operation was chosen + Reason string +} + +// ResourceToApply represents a single resource to be applied in a batch operation. +type ResourceToApply struct { + // Name is a logical name for the resource (used in results for identification) + Name string + + // Manifest is the desired resource state (must have generation annotation) + Manifest *unstructured.Unstructured + + // Existing is the current resource state (nil if not found/discovered) + Existing *unstructured.Unstructured + + // Options for this apply operation (optional, defaults to no special options) + Options *ApplyOptions +} + +// ResourceApplyResult contains the result for a single resource in a batch operation. +type ResourceApplyResult struct { + // Name is the logical name of the resource (from ResourceToApply.Name) + Name string + + // Kind is the Kubernetes resource kind + Kind string + + // Namespace is the resource namespace + Namespace string + + // ResourceName is the Kubernetes resource name (metadata.name) + ResourceName string + + // ApplyResult contains the operation result (nil if failed before apply) + *ApplyResult + + // Error is set if this resource failed to apply + Error error +} + +// ApplyResourcesResult contains the results of a batch apply operation. +type ApplyResourcesResult struct { + // Results contains individual results for each resource in order + Results []*ResourceApplyResult + + // FailedCount is the number of resources that failed + FailedCount int + + // SuccessCount is the number of resources that succeeded + SuccessCount int +} + +// Failed returns true if any resources failed to apply. +func (r *ApplyResourcesResult) Failed() bool { + return r.FailedCount > 0 +} + +// GetResult returns the result for a resource by name, or nil if not found. +func (r *ApplyResourcesResult) GetResult(name string) *ResourceApplyResult { + for _, result := range r.Results { + if result.Name == name { + return result + } + } + return nil +} + +// GetResources returns a map of resource name to the resulting unstructured resource. +func (r *ApplyResourcesResult) GetResources() map[string]*unstructured.Unstructured { + resources := make(map[string]*unstructured.Unstructured) + for _, result := range r.Results { + if result.ApplyResult != nil && result.Resource != nil { + resources[result.Name] = result.Resource + } + } + return resources +} diff --git a/pkg/utils/convert.go b/pkg/utils/convert.go new file mode 100644 index 0000000..75f0ade --- /dev/null +++ b/pkg/utils/convert.go @@ -0,0 +1,229 @@ +// Package utils provides general-purpose utility functions. +package utils + +import ( + "fmt" + "math" + "strconv" + "strings" +) + +// ConvertToType converts a value to the specified type. +// Supported types: string, int, int64, float, float64, bool +// +// Example: +// +// val, err := ConvertToType("42", "int64") // val = int64(42) +// val, err := ConvertToType(3.14, "string") // val = "3.14" +func ConvertToType(value interface{}, targetType string) (interface{}, error) { + switch targetType { + case "string": + return ConvertToString(value) + case "int", "int64": + return ConvertToInt64(value) + case "float", "float64": + return ConvertToFloat64(value) + case "bool": + return ConvertToBool(value) + default: + return nil, fmt.Errorf("unsupported type: %s (supported: string, int, int64, float, float64, bool)", targetType) + } +} + +// ConvertToString converts a value to string. +// Handles all common types including numbers and booleans. +// +//nolint:unparam // error kept for API consistency +func ConvertToString(value interface{}) (string, error) { + switch v := value.(type) { + case string: + return v, nil + case int, int8, int16, int32, int64: + return fmt.Sprintf("%d", v), nil + case uint, uint8, uint16, uint32, uint64: + return fmt.Sprintf("%d", v), nil + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32), nil + case float64: + return strconv.FormatFloat(v, 'f', -1, 64), nil + case bool: + return strconv.FormatBool(v), nil + default: + return fmt.Sprintf("%v", v), nil + } +} + +// ConvertToInt64 converts a value to int64. +// Handles integers, floats, strings, and booleans. +// +// Example: +// +// ConvertToInt64("42") // int64(42), nil +// ConvertToInt64(3.14) // int64(3), nil +// ConvertToInt64(true) // int64(1), nil +func ConvertToInt64(value interface{}) (int64, error) { + switch v := value.(type) { + case int: + return int64(v), nil + case uint64: + if v > math.MaxInt64 { + return 0, fmt.Errorf("uint64 value %d overflows int64", v) + } + return int64(v), nil + case int8: + return int64(v), nil + case int16: + return int64(v), nil + case int32: + return int64(v), nil + case int64: + return v, nil + case uint: + if v > uint(math.MaxInt64) { + return 0, fmt.Errorf("uint value %d overflows int64", v) + } + return int64(v), nil + case uint8: + return int64(v), nil + case uint16: + return int64(v), nil + case uint32: + return int64(v), nil + case float32: + return int64(v), nil + case float64: + return int64(v), nil + case string: + // Try parsing as int first + if i, err := strconv.ParseInt(v, 10, 64); err == nil { + return i, nil + } + // Try parsing as float and convert + if f, err := strconv.ParseFloat(v, 64); err == nil { + return int64(f), nil + } + return 0, fmt.Errorf("cannot convert string '%s' to int", v) + case bool: + if v { + return 1, nil + } + return 0, nil + default: + return 0, fmt.Errorf("cannot convert %T to int", value) + } +} + +// ConvertToFloat64 converts a value to float64. +// Handles integers, floats, strings, and booleans. +// +// Example: +// +// ConvertToFloat64("3.14") // float64(3.14), nil +// ConvertToFloat64(42) // float64(42), nil +// ConvertToFloat64(true) // float64(1.0), nil +func ConvertToFloat64(value interface{}) (float64, error) { + switch v := value.(type) { + case float32: + return float64(v), nil + case float64: + return v, nil + case int: + return float64(v), nil + case int8: + return float64(v), nil + case int16: + return float64(v), nil + case int32: + return float64(v), nil + case int64: + return float64(v), nil + case uint: + return float64(v), nil + case uint8: + return float64(v), nil + case uint16: + return float64(v), nil + case uint32: + return float64(v), nil + case uint64: + return float64(v), nil + case string: + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0, fmt.Errorf("cannot convert string '%s' to float: %w", v, err) + } + return f, nil + case bool: + if v { + return 1.0, nil + } + return 0.0, nil + default: + return 0, fmt.Errorf("cannot convert %T to float", value) + } +} + +// ConvertToBool converts a value to bool. +// Handles booleans, strings (including "yes", "no", "on", "off"), and numbers. +// +// Example: +// +// ConvertToBool("yes") // true, nil +// ConvertToBool("false") // false, nil +// ConvertToBool(1) // true, nil +// ConvertToBool(0) // false, nil +func ConvertToBool(value interface{}) (bool, error) { + switch v := value.(type) { + case bool: + return v, nil + case string: + // Empty string is treated as false + if v == "" { + return false, nil + } + b, err := strconv.ParseBool(v) + if err != nil { + // Handle common truthy/falsy strings + lower := strings.ToLower(v) + switch lower { + case "yes", "y", "on", "1": + return true, nil + case "no", "n", "off", "0": + return false, nil + } + return false, fmt.Errorf("cannot convert string '%s' to bool", v) + } + return b, nil + // NOTE: Each numeric type needs its own case arm. In Go type switches, combined + // cases like "case int, int8, int16:" keep v as interface{}, so "v != 0" would + // compare interface{}(int8(0)) with interface{}(int(0)) - different types that + // are never equal, causing int8(0) to incorrectly return true. + // With separate arms, v is bound to the concrete type, enabling correct comparison. + case int: + return v != 0, nil + case int8: + return v != 0, nil + case int16: + return v != 0, nil + case int32: + return v != 0, nil + case int64: + return v != 0, nil + case uint: + return v != 0, nil + case uint8: + return v != 0, nil + case uint16: + return v != 0, nil + case uint32: + return v != 0, nil + case uint64: + return v != 0, nil + case float32: + return v != 0, nil + case float64: + return v != 0, nil + default: + return false, fmt.Errorf("cannot convert %T to bool", value) + } +} diff --git a/pkg/utils/env.go b/pkg/utils/env.go new file mode 100644 index 0000000..14b88e3 --- /dev/null +++ b/pkg/utils/env.go @@ -0,0 +1,25 @@ +// Package utils provides general-purpose utility functions. +package utils + +import ( + "fmt" + "os" +) + +// GetEnvOrError retrieves an environment variable value. +// Unlike os.Getenv which returns an empty string for unset variables, +// this function returns an error if the variable is not set. +// +// Example: +// +// val, err := GetEnvOrError("DATABASE_URL") +// if err != nil { +// // Variable not set +// } +func GetEnvOrError(envVar string) (string, error) { + value, exists := os.LookupEnv(envVar) + if !exists { + return "", fmt.Errorf("environment variable %s not set", envVar) + } + return value, nil +} diff --git a/pkg/utils/map.go b/pkg/utils/map.go new file mode 100644 index 0000000..82a1db0 --- /dev/null +++ b/pkg/utils/map.go @@ -0,0 +1,162 @@ +package utils + +import ( + "fmt" + + "github.com/mitchellh/copystructure" +) + +// ConvertToStringKeyMap converts map[interface{}]interface{} to map[string]interface{}. +// This is commonly needed when working with YAML data which may have interface{} keys. +func ConvertToStringKeyMap(m map[interface{}]interface{}) map[string]interface{} { + result := make(map[string]interface{}) + for k, v := range m { + strKey := fmt.Sprintf("%v", k) + switch val := v.(type) { + case map[interface{}]interface{}: + result[strKey] = ConvertToStringKeyMap(val) + case []interface{}: + result[strKey] = convertSlice(val) + default: + result[strKey] = v + } + } + return result +} + +// convertSlice converts slice elements recursively, handling nested maps. +func convertSlice(s []interface{}) []interface{} { + result := make([]interface{}, len(s)) + for i, v := range s { + switch val := v.(type) { + case map[interface{}]interface{}: + result[i] = ConvertToStringKeyMap(val) + case []interface{}: + result[i] = convertSlice(val) + default: + result[i] = v + } + } + return result +} + +// DeepCopyMap creates a deep copy of a map. +// This handles non-JSON-serializable types and preserves type information +// (e.g., int64 stays int64, not float64 like JSON marshal/unmarshal). +// +// Returns an error if deep copy fails. +func DeepCopyMap(m map[string]interface{}) (map[string]interface{}, error) { + if m == nil { + return nil, nil + } + + copied, err := copystructure.Copy(m) + if err != nil { + return nil, fmt.Errorf("failed to deep copy map: %w", err) + } + + result, ok := copied.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type after deep copy: %T", copied) + } + + return result, nil +} + +// DeepCopyMapWithFallback creates a deep copy of a map, falling back to shallow copy on error. +// Use this when you want to continue even if deep copy fails. +func DeepCopyMapWithFallback(m map[string]interface{}) map[string]interface{} { + if m == nil { + return nil + } + + result, err := DeepCopyMap(m) + if err != nil { + // Fallback to shallow copy + result = make(map[string]interface{}) + for k, v := range m { + result[k] = v + } + } + + return result +} + +// GetNestedValue extracts a value from a nested map using dot notation. +// Supports both map[string]interface{} and map[interface{}]interface{} at any level. +// +// Parameters: +// - data: The map to extract from +// - path: Dot-separated path (e.g., "metadata.labels.app") +// +// Returns the value at the path or an error if any part of the path doesn't exist. +// +// Example: +// +// data := map[string]interface{}{ +// "metadata": map[string]interface{}{ +// "labels": map[string]interface{}{ +// "app": "myapp", +// }, +// }, +// } +// val, err := GetNestedValue(data, "metadata.labels.app") // val = "myapp" +func GetNestedValue(data map[string]interface{}, path string) (interface{}, error) { + parts := splitPath(path) + var current interface{} = data + + for i, part := range parts { + switch v := current.(type) { + case map[string]interface{}: + val, ok := v[part] + if !ok { + return nil, fmt.Errorf("field '%s' not found at path '%s'", part, joinPath(parts[:i+1])) + } + current = val + case map[interface{}]interface{}: + val, ok := v[part] + if !ok { + return nil, fmt.Errorf("field '%s' not found at path '%s'", part, joinPath(parts[:i+1])) + } + current = val + default: + return nil, fmt.Errorf("cannot access field '%s': parent is not a map (got %T)", part, current) + } + } + + return current, nil +} + +// splitPath splits a dot-separated path into parts. +func splitPath(path string) []string { + if path == "" { + return nil + } + // Simple split - doesn't handle escaped dots + result := make([]string, 0) + start := 0 + for i := 0; i < len(path); i++ { + if path[i] == '.' { + if i > start { + result = append(result, path[start:i]) + } + start = i + 1 + } + } + if start < len(path) { + result = append(result, path[start:]) + } + return result +} + +// joinPath joins path parts with dots. +func joinPath(parts []string) string { + if len(parts) == 0 { + return "" + } + result := parts[0] + for i := 1; i < len(parts); i++ { + result += "." + parts[i] + } + return result +} diff --git a/pkg/utils/path.go b/pkg/utils/path.go new file mode 100644 index 0000000..d0789cc --- /dev/null +++ b/pkg/utils/path.go @@ -0,0 +1,52 @@ +// Package utils provides general-purpose utility functions. +package utils + +import ( + "fmt" + "path/filepath" + "strings" +) + +// ResolveSecurePath resolves a relative path against the base directory and validates +// that the resolved path does not escape the base directory. +// This prevents directory traversal attacks (e.g., "../../../etc/passwd"). +// +// Parameters: +// - baseDir: The base directory to resolve paths relative to +// - refPath: The path to resolve (can be relative or absolute) +// +// Returns the resolved absolute path or an error if the path escapes baseDir. +// +// Example: +// +// path, err := ResolveSecurePath("/app/configs", "manifests/deployment.yaml") +// // path = "/app/configs/manifests/deployment.yaml" +// +// path, err := ResolveSecurePath("/app/configs", "../../../etc/passwd") +// // err = "path \"../../../etc/passwd\" escapes base directory" +func ResolveSecurePath(baseDir, refPath string) (string, error) { + baseAbs, err := filepath.Abs(baseDir) + if err != nil { + return "", fmt.Errorf("failed to resolve base directory: %w", err) + } + baseClean := filepath.Clean(baseAbs) + + var targetPath string + if filepath.IsAbs(refPath) { + targetPath = filepath.Clean(refPath) + } else { + targetPath = filepath.Clean(filepath.Join(baseClean, refPath)) + } + + // Check if target path is within base directory + rel, err := filepath.Rel(baseClean, targetPath) + if err != nil { + return "", fmt.Errorf("path %q escapes base directory", refPath) + } + + if strings.HasPrefix(rel, "..") { + return "", fmt.Errorf("path %q escapes base directory", refPath) + } + + return targetPath, nil +} diff --git a/pkg/utils/reflect.go b/pkg/utils/reflect.go new file mode 100644 index 0000000..0e9e3ac --- /dev/null +++ b/pkg/utils/reflect.go @@ -0,0 +1,21 @@ +// Package utils provides general-purpose utility functions. +package utils + +import "reflect" + +// IsSliceOrArray checks if a value is a slice or array using reflection. +// Returns false for nil values. +// +// Example: +// +// IsSliceOrArray([]int{1, 2, 3}) // true +// IsSliceOrArray([3]int{1, 2, 3}) // true +// IsSliceOrArray("not a slice") // false +// IsSliceOrArray(nil) // false +func IsSliceOrArray(value interface{}) bool { + if value == nil { + return false + } + kind := reflect.TypeOf(value).Kind() + return kind == reflect.Slice || kind == reflect.Array +} diff --git a/pkg/utils/template.go b/pkg/utils/template.go new file mode 100644 index 0000000..1208a98 --- /dev/null +++ b/pkg/utils/template.go @@ -0,0 +1,160 @@ +// Package utils provides general-purpose utility functions. +package utils + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "text/template" + "time" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +// TemplateFuncs provides helper functions for Go templates. +// These functions are available within {{ }} template expressions. +var TemplateFuncs = template.FuncMap{ + // Time functions + "now": time.Now, + "date": func(layout string, t time.Time) string { + return t.Format(layout) + }, + "dateFormat": func(layout string, t time.Time) string { + return t.Format(layout) + }, + + // String functions + "lower": strings.ToLower, + "upper": strings.ToUpper, + "title": func(s string) string { + return cases.Title(language.English).String(s) + }, + "trim": strings.TrimSpace, + "trimPrefix": strings.TrimPrefix, + "trimSuffix": strings.TrimSuffix, + "replace": strings.ReplaceAll, + "contains": strings.Contains, + "hasPrefix": strings.HasPrefix, + "hasSuffix": strings.HasSuffix, + + // Quote function + "quote": func(s string) string { + return fmt.Sprintf("%q", s) + }, + + // Default value function - returns defaultVal if val is nil or empty + "default": func(defaultVal, val interface{}) interface{} { + if val == nil || val == "" { + return defaultVal + } + return val + }, + + // Type conversion functions + "int": func(v interface{}) int { + switch val := v.(type) { + case int: + return val + case int64: + return int(val) + case float64: + return int(val) + case string: + i, _ := strconv.Atoi(val) //nolint:errcheck // returns 0 on error, which is acceptable + return i + default: + return 0 + } + }, + "int64": func(v interface{}) int64 { + switch val := v.(type) { + case int: + return int64(val) + case int64: + return val + case float64: + return int64(val) + case string: + i, _ := strconv.ParseInt(val, 10, 64) //nolint:errcheck // returns 0 on error, which is acceptable + return i + default: + return 0 + } + }, + "float": func(v interface{}) float64 { + switch val := v.(type) { + case int: + return float64(val) + case int64: + return float64(val) + case float64: + return val + case string: + f, _ := strconv.ParseFloat(val, 64) //nolint:errcheck // returns 0 on error, which is acceptable + return f + default: + return 0 + } + }, + "float64": func(v interface{}) float64 { + switch val := v.(type) { + case int: + return float64(val) + case int64: + return float64(val) + case float64: + return val + case string: + f, _ := strconv.ParseFloat(val, 64) //nolint:errcheck // returns 0 on error, which is acceptable + return f + default: + return 0 + } + }, + "string": func(v interface{}) string { + return fmt.Sprintf("%v", v) + }, +} + +// RenderTemplate renders a Go template string with the given data. +// If the string contains no template delimiters ({{ }}), it is returned as-is. +// +// Parameters: +// - templateStr: The template string to render +// - data: The data to use for template rendering +// +// Returns the rendered string or an error if rendering fails. +// +// Example: +// +// rendered, err := RenderTemplate("Hello {{.name}}", map[string]interface{}{"name": "World"}) +// // rendered = "Hello World" +func RenderTemplate(templateStr string, data map[string]interface{}) (string, error) { + // If no template delimiters, return as-is + if !strings.Contains(templateStr, "{{") { + return templateStr, nil + } + + tmpl, err := template.New("template").Funcs(TemplateFuncs).Option("missingkey=error").Parse(templateStr) + if err != nil { + return "", fmt.Errorf("failed to parse template: %w", err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("failed to execute template: %w", err) + } + + return buf.String(), nil +} + +// RenderTemplateBytes renders a Go template string and returns the result as bytes. +func RenderTemplateBytes(templateStr string, data map[string]interface{}) ([]byte, error) { + result, err := RenderTemplate(templateStr, data) + if err != nil { + return nil, err + } + return []byte(result), nil +} diff --git a/test/integration/executor/executor_k8s_integration_test.go b/test/integration/executor/executor_k8s_integration_test.go index 0bc52f8..53fe156 100644 --- a/test/integration/executor/executor_k8s_integration_test.go +++ b/test/integration/executor/executor_k8s_integration_test.go @@ -14,8 +14,8 @@ import ( "github.com/cloudevents/sdk-go/v2/event" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/generation" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/manifest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -370,7 +370,7 @@ func TestExecutor_K8s_CreateResources(t *testing.T) { cmResult := result.ResourceResults[0] assert.Equal(t, "clusterConfigMap", cmResult.Name) assert.Equal(t, executor.StatusSuccess, cmResult.Status, "ConfigMap creation should succeed") - assert.Equal(t, generation.OperationCreate, cmResult.Operation, "Should be create operation") + assert.Equal(t, manifest.OperationCreate, cmResult.Operation, "Should be create operation") assert.Equal(t, "ConfigMap", cmResult.Kind) t.Logf("ConfigMap created: %s/%s (operation: %s)", cmResult.Namespace, cmResult.ResourceName, cmResult.Operation) @@ -378,7 +378,7 @@ func TestExecutor_K8s_CreateResources(t *testing.T) { secretResult := result.ResourceResults[1] assert.Equal(t, "clusterSecret", secretResult.Name) assert.Equal(t, executor.StatusSuccess, secretResult.Status, "Secret creation should succeed") - assert.Equal(t, generation.OperationCreate, secretResult.Operation) + assert.Equal(t, manifest.OperationCreate, secretResult.Operation) assert.Equal(t, "Secret", secretResult.Kind) t.Logf("Secret created: %s/%s (operation: %s)", secretResult.Namespace, secretResult.ResourceName, secretResult.Operation) @@ -503,7 +503,7 @@ func TestExecutor_K8s_UpdateExistingResource(t *testing.T) { // Verify it was an update operation require.Len(t, result.ResourceResults, 1) cmResult := result.ResourceResults[0] - assert.Equal(t, generation.OperationUpdate, cmResult.Operation, "Should be update operation") + assert.Equal(t, manifest.OperationUpdate, cmResult.Operation, "Should be update operation") t.Logf("Resource operation: %s", cmResult.Operation) // Verify ConfigMap was updated with new data @@ -607,14 +607,14 @@ func TestExecutor_K8s_DiscoveryByLabels(t *testing.T) { evt := createK8sTestEvent(clusterId) result1 := exec.Execute(ctx, evt) require.Equal(t, executor.StatusSuccess, result1.Status) - assert.Equal(t, generation.OperationCreate, result1.ResourceResults[0].Operation) + assert.Equal(t, manifest.OperationCreate, result1.ResourceResults[0].Operation) t.Logf("First execution: %s", result1.ResourceResults[0].Operation) // Second execution - should find by labels and update evt2 := createK8sTestEvent(clusterId) result2 := exec.Execute(ctx, evt2) require.Equal(t, executor.StatusSuccess, result2.Status) - assert.Equal(t, generation.OperationUpdate, result2.ResourceResults[0].Operation) + assert.Equal(t, manifest.OperationUpdate, result2.ResourceResults[0].Operation) t.Logf("Second execution: %s (discovered by labels)", result2.ResourceResults[0].Operation) } @@ -678,7 +678,7 @@ func TestExecutor_K8s_RecreateOnChange(t *testing.T) { evt := createK8sTestEvent(clusterId) result1 := exec.Execute(ctx, evt) require.Equal(t, executor.StatusSuccess, result1.Status) - assert.Equal(t, generation.OperationCreate, result1.ResourceResults[0].Operation) + assert.Equal(t, manifest.OperationCreate, result1.ResourceResults[0].Operation) // Get the original UID cmGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} @@ -692,7 +692,7 @@ func TestExecutor_K8s_RecreateOnChange(t *testing.T) { evt2 := createK8sTestEvent(clusterId) result2 := exec.Execute(ctx, evt2) require.Equal(t, executor.StatusSuccess, result2.Status) - assert.Equal(t, generation.OperationRecreate, result2.ResourceResults[0].Operation) + assert.Equal(t, manifest.OperationRecreate, result2.ResourceResults[0].Operation) t.Logf("Second execution: %s", result2.ResourceResults[0].Operation) // Verify it's a new resource (different UID) @@ -741,7 +741,7 @@ func TestExecutor_K8s_MultipleResourceTypes(t *testing.T) { // Verify both resources created for _, rr := range result.ResourceResults { assert.Equal(t, executor.StatusSuccess, rr.Status, "Resource %s should succeed", rr.Name) - assert.Equal(t, generation.OperationCreate, rr.Operation) + assert.Equal(t, manifest.OperationCreate, rr.Operation) t.Logf("Created %s: %s/%s", rr.Kind, rr.Namespace, rr.ResourceName) } @@ -942,7 +942,7 @@ func TestExecutor_K8s_MultipleMatchingResources(t *testing.T) { // Should create a new resource (no discovery configured) rr := result.ResourceResults[0] - assert.Equal(t, generation.OperationCreate, rr.Operation, + assert.Equal(t, manifest.OperationCreate, rr.Operation, "Should create new resource (no discovery configured)") t.Logf("Operation: %s on resource: %s/%s", rr.Operation, rr.Namespace, rr.ResourceName)