diff --git a/README.md b/README.md index 579f7d3..9e120fe 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,7 @@ A HyperFleet Adapter requires several files for configuration: To see all configuration options read [configuration.md](configuration.md) file #### Adapter configuration + The adapter deployment configuration (`AdapterConfig`) controls runtime and infrastructure settings for the adapter process, such as client connections, retries, and broker subscription details. It is loaded with Viper, so values can be overridden by CLI flags @@ -167,10 +168,11 @@ and environment variables in this priority order: CLI flags > env vars > file > (HyperFleet API, Maestro, broker, Kubernetes) Reference examples: -- `configs/adapter-deployment-config.yaml` (full reference with env/flag notes) -- `charts/examples/adapter-config.yaml` (minimal deployment example) + +- `charts/examples/adapter-config.yaml` #### Adapter task configuration + The adapter task configuration (`AdapterTaskConfig`) defines the **business logic** for processing events: parameters, preconditions, resources to create, and post-actions. This file is loaded as **static YAML** (no Viper overrides) and is required at runtime. @@ -180,13 +182,13 @@ This file is loaded as **static YAML** (no Viper overrides) and is required at r - **Resource manifests**: inline YAML or external file via `manifest.ref` Reference examples: -- `charts/examples/adapter-task-config.yaml` (worked example) -- `configs/adapter-task-config-template.yaml` (complete schema reference) +- `charts/examples/adapter-task-config.yaml` (worked example) ### Broker Configuration Broker configuration is particular since responsibility is split between: + - **Hyperfleet broker library**: configures the connection to a concrete broker (google pubsub, rabbitmq, ...) - Configured using a YAML file specified by the `BROKER_CONFIG_FILE` environment variable - **Adapter**: configures which topic/subscriptions to use on the broker diff --git a/charts/examples/adapter-config.yaml b/charts/examples/adapter-config.yaml index d4502f1..439d5c5 100644 --- a/charts/examples/adapter-config.yaml +++ b/charts/examples/adapter-config.yaml @@ -11,7 +11,9 @@ spec: version: "0.1.0" # Log the full merged configuration after load (default: false) - debugConfig: false + debugConfig: true + log: + level: debug clients: hyperfleetApi: @@ -22,8 +24,51 @@ spec: retryBackoff: exponential broker: - subscriptionId: "example-clusters-subscription" - topic: "example-clusters" + subscriptionId: "CHANGE_ME" + topic: "CHANGE_ME" kubernetes: apiVersion: "v1" + #kubeConfigPath: PATH_TO_KUBECONFIG # for local development + + maestro: + grpcServerAddress: "maestro-grpc.maestro.svc.cluster.local:8090" + + # HTTPS server address for REST API operations (optional) + # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS + httpServerAddress: "http://maestro.maestro.svc.cluster.local:8000" + + # Source identifier for CloudEvents routing (must be unique across adapters) + # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID + sourceId: "hyperfleet-adapter" + + # Client identifier (defaults to sourceId if not specified) + # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID + clientId: "hyperfleet-adapter-client" + insecure: true + + # Authentication configuration + #auth: + # type: "tls" # TLS certificate-based mTLS + # + # tlsConfig: + # # gRPC TLS configuration + # # Certificate paths (mounted from Kubernetes secrets) + # # Environment variable: HYPERFLEET_MAESTRO_CA_FILE + # caFile: "/etc/maestro/certs/grpc/ca.crt" + # + # # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE + # certFile: "/etc/maestro/certs/grpc/client.crt" + # + # # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE + # keyFile: "/etc/maestro/certs/grpc/client.key" + # + # # Server name for TLS verification + # # Environment variable: HYPERFLEET_MAESTRO_SERVER_NAME + # serverName: "maestro-grpc.maestro.svc.cluster.local" + # + # # HTTP API TLS configuration (may use different CA than gRPC) + # # If not set, falls back to caFile for backwards compatibility + # # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE + # httpCaFile: "/etc/maestro/certs/https/ca.crt" + diff --git a/charts/examples/adapter-task-config.yaml b/charts/examples/adapter-task-config.yaml index e517ef7..14fc246 100644 --- a/charts/examples/adapter-task-config.yaml +++ b/charts/examples/adapter-task-config.yaml @@ -76,7 +76,31 @@ spec: # Resources with valid K8s manifests resources: + - name: "maestro" + transport: + client: "maestro" + maestro: + targetCluster: cluster1 + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "maestro-{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + annotations: + hyperfleet.io/generation: "{{ .generationSpec }}" + discovery: + namespace: "*" # Cluster-scoped resource (Namespace) + bySelectors: + labelSelector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + - name: "clusterNamespace" + transport: + client: "kubernetes" manifest: apiVersion: v1 kind: Namespace @@ -98,6 +122,8 @@ spec: # in the namespace created above # it will require a service account to be created in that namespace as well as a role and rolebinding - name: "jobServiceAccount" + transport: + client: "kubernetes" manifest: ref: "/etc/adapter/job-serviceaccount.yaml" discovery: @@ -107,6 +133,8 @@ spec: hyperfleet.io/cluster-id: "{{ .clusterId }}" - name: "job_role" + transport: + client: "kubernetes" manifest: ref: "/etc/adapter/job-role.yaml" discovery: @@ -118,6 +146,8 @@ spec: hyperfleet.io/resource-type: "role" - name: "job_rolebinding" + transport: + client: "kubernetes" manifest: ref: "/etc/adapter/job-rolebinding.yaml" discovery: @@ -129,6 +159,8 @@ spec: hyperfleet.io/resource-type: "rolebinding" - name: "jobNamespace" + transport: + client: "kubernetes" manifest: ref: "/etc/adapter/job.yaml" discovery: @@ -143,6 +175,8 @@ spec: # and using the same service account as the adapter - name: "deploymentNamespace" + transport: + client: "kubernetes" manifest: ref: "/etc/adapter/deployment.yaml" discovery: diff --git a/cmd/adapter/main.go b/cmd/adapter/main.go index 292a8c5..676cee1 100644 --- a/cmd/adapter/main.go +++ b/cmd/adapter/main.go @@ -13,6 +13,7 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/maestro_client" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/health" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/otel" @@ -285,14 +286,32 @@ func runServe() error { return fmt.Errorf("failed to create Kubernetes client: %w", err) } + // Create Maestro client if configured + var maestroClient maestro_client.ManifestWorkClient + if config.Spec.Clients.Maestro != nil { + log.Info(ctx, "Creating Maestro client...") + maestroClient, err = createMaestroClient(ctx, config.Spec.Clients.Maestro, log) + if err != nil { + errCtx := logger.WithErrorField(ctx, err) + log.Errorf(errCtx, "Failed to create Maestro client") + return fmt.Errorf("failed to create Maestro client: %w", err) + } + log.Info(ctx, "Maestro client created successfully") + } + // Create the executor using the builder pattern log.Info(ctx, "Creating event executor...") - exec, err := executor.NewBuilder(). + execBuilder := executor.NewBuilder(). WithConfig(config). WithAPIClient(apiClient). WithK8sClient(k8sClient). - WithLogger(log). - Build() + WithLogger(log) + + if maestroClient != nil { + execBuilder = execBuilder.WithMaestroClient(maestroClient) + } + + exec, err := execBuilder.Build() if err != nil { errCtx := logger.WithErrorField(ctx, err) log.Errorf(errCtx, "Failed to create executor") @@ -494,3 +513,31 @@ func createK8sClient(ctx context.Context, k8sConfig config_loader.KubernetesConf } return k8s_client.NewClient(ctx, clientConfig, log) } + +// createMaestroClient creates a Maestro client from the config +func createMaestroClient(ctx context.Context, maestroConfig *config_loader.MaestroClientConfig, log logger.Logger) (*maestro_client.Client, error) { + clientConfig := &maestro_client.Config{ + MaestroServerAddr: maestroConfig.HTTPServerAddress, + GRPCServerAddr: maestroConfig.GRPCServerAddress, + SourceID: maestroConfig.SourceID, + Insecure: maestroConfig.Insecure, + } + + // Parse timeout if specified + if maestroConfig.Timeout != "" { + timeout, err := time.ParseDuration(maestroConfig.Timeout) + if err != nil { + return nil, fmt.Errorf("invalid maestro timeout %q: %w", maestroConfig.Timeout, err) + } + clientConfig.HTTPTimeout = timeout + } + + // Configure TLS if auth type is "tls" + if maestroConfig.Auth.Type == "tls" && maestroConfig.Auth.TLSConfig != nil { + clientConfig.CAFile = maestroConfig.Auth.TLSConfig.CAFile + clientConfig.ClientCertFile = maestroConfig.Auth.TLSConfig.CertFile + clientConfig.ClientKeyFile = maestroConfig.Auth.TLSConfig.KeyFile + } + + return maestro_client.NewMaestroClient(ctx, clientConfig, log) +} diff --git a/configs/README.md b/configs/README.md deleted file mode 100644 index 0afaf3d..0000000 --- a/configs/README.md +++ /dev/null @@ -1,216 +0,0 @@ -# Broker Configuration - -This directory contains ConfigMap templates and examples for configuring the hyperfleet-adapter broker consumer. - -## Files - -- **`broker-configmap-pubsub-template.yaml`** - Comprehensive template with all options and documentation -- **`broker-configmap-pubsub-example.yaml`** - Simple ready-to-use example for quick start - -## Quick Start - -### 1. Choose Your Broker - -Currently supported: -- **Google Pub/Sub** - For GCP environments -- RabbitMQ - (template can be added if needed) - -### 2. Configure for Google Pub/Sub - -Edit `broker-configmap-pubsub-example.yaml`: - -```yaml -data: - # Broker configuration - BROKER_GOOGLEPUBSUB_PROJECT_ID: "your-gcp-project" -``` - -Also set the adapter broker settings in the deployment config: - -```yaml -spec: - clients: - broker: - subscriptionId: "your-subscription-name" - topic: "your-topic-name" -``` - -### 3. Apply the ConfigMap - -```bash -kubectl apply -f configs/broker-configmap-pubsub-example.yaml -``` - -### 4. Reference in Deployment - -The adapter deployment should reference this ConfigMap using `envFrom`: - -```yaml -spec: - containers: - - name: adapter - envFrom: - - configMapRef: - name: hyperfleet-broker-pubsub -``` - -## Configuration Options - -### Environment Variables - -The hyperfleet-broker library reads configuration from environment variables: - -#### Required Variables - -| Variable | Description | Example | -|----------|-------------|---------| -| `BROKER_TYPE` | Broker type | `googlepubsub` | -| `BROKER_GOOGLEPUBSUB_PROJECT_ID` | GCP project ID | `my-project` | - -#### Optional Variables - -| Variable | Description | Default | -|----------|-------------|---------| -| `BROKER_GOOGLEPUBSUB_TOPIC` | Topic name for publishing | - | -| `BROKER_GOOGLEPUBSUB_MAX_OUTSTANDING_MESSAGES` | Max unacked messages | `1000` | -| `BROKER_GOOGLEPUBSUB_NUM_GOROUTINES` | Pub/Sub client goroutines | `10` | -| `SUBSCRIBER_PARALLELISM` | Concurrent handlers | `1` | -| `LOG_CONFIG` | Log configuration at startup | `false` | - -### Configuration File (Alternative) - -Instead of environment variables, you can use a `broker.yaml` file: - -```yaml -broker: - type: googlepubsub - googlepubsub: - project_id: "my-project" - subscription: "my-subscription" - max_outstanding_messages: 1000 - num_goroutines: 10 - -subscriber: - parallelism: 5 -``` - -Mount this as a file and set `BROKER_CONFIG_FILE=/etc/broker/broker.yaml`. - -## GCP Authentication - -### Option 1: Workload Identity (Recommended for GKE) - -```yaml -serviceAccountName: hyperfleet-adapter -# GKE will automatically inject credentials -``` - -### Option 2: Service Account Key - -```yaml -env: -- name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/secrets/google/key.json -volumeMounts: -- name: gcp-credentials - mountPath: /var/secrets/google - readOnly: true -volumes: -- name: gcp-credentials - secret: - secretName: gcp-service-account-key -``` - -### Option 3: Emulator (Development Only) - -```yaml -env: -- name: PUBSUB_EMULATOR_HOST - value: "localhost:8085" -``` - -## Performance Tuning - -### High Throughput - -For processing many messages: - -```yaml -BROKER_GOOGLEPUBSUB_MAX_OUTSTANDING_MESSAGES: "5000" -BROKER_GOOGLEPUBSUB_NUM_GOROUTINES: "20" -SUBSCRIBER_PARALLELISM: "10" -``` - -### Low Latency - -For quick response times: - -```yaml -BROKER_GOOGLEPUBSUB_MAX_OUTSTANDING_MESSAGES: "100" -BROKER_GOOGLEPUBSUB_NUM_GOROUTINES: "5" -SUBSCRIBER_PARALLELISM: "3" -``` - -### Memory Constrained - -For limited memory environments: - -```yaml -BROKER_GOOGLEPUBSUB_MAX_OUTSTANDING_MESSAGES: "100" -BROKER_GOOGLEPUBSUB_NUM_GOROUTINES: "2" -SUBSCRIBER_PARALLELISM: "1" -``` - -## Troubleshooting - -### Enable Debug Logging - -```yaml -LOG_CONFIG: "true" -``` - -This will log the complete broker configuration at startup. - -### Check Credentials - -```bash -# Verify service account has required permissions -kubectl exec -it -- sh -# Inside pod: -gcloud auth list -gcloud pubsub subscriptions list --project= -``` - -### Test Pub/Sub Connection - -```bash -# From adapter pod -gcloud pubsub subscriptions pull \ - --project= \ - --limit=1 \ - --auto-ack -``` - -## Required GCP Permissions - -The service account needs these IAM roles: - -``` -roles/pubsub.subscriber # To consume messages -roles/pubsub.viewer # To view subscriptions -``` - -Or these specific permissions: - -``` -pubsub.subscriptions.consume -pubsub.subscriptions.get -``` - -## See Also - -- [hyperfleet-broker Library](https://github.com/openshift-hyperfleet/hyperfleet-broker) -- [Internal broker_consumer Package](../internal/broker_consumer/README.md) -- [Integration Tests](../test/integration/broker_consumer/README.md) -- [CloudEvents Specification](https://github.com/cloudevents/spec) - diff --git a/configs/adapter-config-template.yaml b/configs/adapter-config-template.yaml deleted file mode 100644 index 4f2ce1b..0000000 --- a/configs/adapter-config-template.yaml +++ /dev/null @@ -1,293 +0,0 @@ -# HyperFleet Adapter Task Configuration Template (MVP) -# -# This is a Configuration Template for configuring cloud provider adapters -# using the HyperFleet Adapter Framework with CEL (Common Expression Language). -# -# TEMPLATE SYNTAX: -# ================ -# 1. Go Templates ({{ .var }}) - Variable interpolation throughout -# 2. field: "path" - Simple JSON path extraction (translated to CEL internally) -# 3. expression: "cel" - Full CEL expressions for complex logic -# -# CONDITION SYNTAX (when:): -# ========================= -# Option 1: Expression syntax (CEL) -# when: -# expression: | -# clusterPhase == "Terminating" -# -# Option 2: Structured conditions (field + operator + value) -# when: -# conditions: -# - field: "clusterPhase" -# operator: "equals" -# value: "Terminating" -# -# Supported operators: equals, notEquals, in, notIn, contains, greaterThan, lessThan, exists -# -# CEL OPTIONAL CHAINING: -# ====================== -# Use optional chaining with orValue() to safely access potentially missing fields: -# resources.?clusterNamespace.?status.?phase.orValue("") -# adapter.?executionStatus.orValue("") -# -# Copy this file to your adapter repository and customize for your needs. - -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - # Adapter name (used as resource name and in logs/metrics) - name: example-adapter - labels: - hyperfleet.io/adapter-type: example - hyperfleet.io/component: adapter - -# ============================================================================ -# Task Specification -# ============================================================================ -spec: - # ============================================================================ - # Global params - # ============================================================================ - # params to extract from CloudEvent and environment variables - # - # SUPPORTED TYPES: - # ================ - # - string: Default, any value converted to string - # - int/int64: Integer value (strings parsed, floats truncated) - # - float/float64: Floating point value - # - bool: Boolean (supports: true/false, yes/no, on/off, 1/0) - # - params: - # Environment variables from deployment - - name: "hyperfleetApiBaseUrl" - source: "env.HYPERFLEET_API_BASE_URL" - type: "string" - description: "Base URL for the HyperFleet API" - required: true - - - name: "hyperfleetApiVersion" - source: "env.HYPERFLEET_API_VERSION" - type: "string" - default: "v1" - description: "API version to use" - - # Extract from CloudEvent data - - name: "clusterId" - source: "event.id" - type: "string" - description: "Unique identifier for the target cluster" - required: true - - # Example: Extract and convert to int - # - name: "nodeCount" - # source: "event.spec.nodeCount" - # type: "int" - # default: 3 - # description: "Number of nodes in the cluster" - - # Example: Extract and convert to bool - # - name: "enableFeature" - # source: "env.ENABLE_FEATURE" - # type: "bool" - # default: false - # description: "Enable experimental feature" - - - # ============================================================================ - # Global Preconditions - # ============================================================================ - # These preconditions run sequentially and validate cluster state before resource operations. - # - # DATA SCOPES: - # ============ - # Capture scope (field/expression): API response data only - # - Access: status.phase, items[0].name, etc. - # - # Conditions scope (conditions/expression): Full execution context - # - params.* : Original extracted params - # - .*: Full API response (e.g., clusterStatus.status.phase) - # - capturedField : Explicitly captured values - # - adapter.* : Adapter metadata - # - resources.* : Created resources (empty during preconditions) - # - preconditions: - # ========================================================================== - # Step 1: Get cluster status - # ========================================================================== - - name: "clusterStatus" - apiCall: - method: "GET" - # NOTE: API path includes /api/hyperfleet/ prefix - url: "{{ .hyperfleetApiBaseUrl }}/api/hyperfleet/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - # Capture fields from the API response. Captured values become variables for use in resources section. - # SCOPE: API response data only - # Supports two modes: - # - field: Simple dot notation or JSONPath expression for extracting values - # - expression: CEL expression for computed values - # Only one of 'field' or 'expression' can be set per capture. - capture: - # Simple dot notation - - name: "clusterName" - field: "name" - - name: "clusterPhase" - field: "status.phase" - - name: "generationId" - field: "generation" - - # JSONPath for complex extraction (filter by field value) - # See: https://kubernetes.io/docs/reference/kubectl/jsonpath/ - # - name: "lzNamespaceStatus" - # field: "{.items[?(@.adapter=='landing-zone-adapter')].data.namespace.status}" - - # CEL expression for computed values - # - name: "activeItemCount" - # expression: "items.filter(i, i.status == 'active').size()" - - # Conditions to check. SCOPE: Full execution context - # You can access: - # - Captured values: clusterPhase, clusterName, etc. - # - Full API response: clusterStatus.status.phase, clusterStatus.spec.nodeCount - # - Params: clusterId, hyperfleetApiBaseUrl, etc. - conditions: - # Using captured value - - field: "clusterPhase" - operator: "equals" - value: "NotReady" - - # Or dig directly into API response using precondition name - # - field: "clusterStatus.status.nodeCount" - # operator: "greaterThan" - # value: 0 - - # Alternative: CEL expression with full access - # expression: | - # clusterStatus.status.phase == "Ready" && - # clusterStatus.spec.nodeCount > 0 - - # ============================================================================ - # Resources (Create/Update Resources) - # ============================================================================ - # All resources are created/updated sequentially in the order defined below - resources: - # ========================================================================== - # Resource 1: Cluster Namespace - # ========================================================================== - - name: "clusterNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - # Use | lower to ensure valid K8s resource name (lowercase RFC 1123) - name: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - hyperfleet.io/resource-type: "namespace" - annotations: - hyperfleet.io/created-by: "hyperfleet-adapter" - hyperfleet.io/generation: "{{ .generationId }}" - discovery: - # The "namespace" field within discovery is optional: - # - For namespaced resources: set namespace to target the specific namespace - # - For cluster-scoped resources (like Namespace, ClusterRole): omit or leave empty - # Here we omit it since Namespace is cluster-scoped - bySelectors: - labelSelector: - hyperfleet.io/resource-type: "namespace" - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - - - # ============================================================================ - # Post-Processing - # ============================================================================ - post: - payloads: - # Build status payload inline - - name: "clusterStatusPayload" - build: - # Adapter name for tracking which adapter reported this status - adapter: "{{ .metadata.name }}" - - # Conditions array - each condition has type, status, reason, message - # Use CEL optional chaining ?.orValue() for safe field access - conditions: - # Applied: Resources successfully created - - type: "Applied" - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" - ? "NamespaceCreated" - : "NamespacePending" - message: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" - ? "Namespace created successfully" - : "Namespace creation in progress" - - # Available: Resources are active and ready - - type: "Available" - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "NamespaceReady" : "NamespaceNotReady" - message: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "Namespace is active and ready" : "Namespace is not active and ready" - - # Health: Adapter execution status (runtime) Don't need to update this. This can be reused from the adapter config. - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" ? "True" : (adapter.?executionStatus.orValue("") == "failed" ? "False" : "Unknown") - reason: - expression: | - adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" - message: - expression: | - adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" - - # Use CEL expression for numeric fields to preserve type (not Go template which outputs strings) - observed_generation: - expression: "generationId" - - # Use Go template with now and date functions for timestamps - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - - # Optional data field for adapter-specific metrics extracted from resources - data: - namespace: - name: - expression: | - resources.?clusterNamespace.?metadata.?name.orValue("") - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") - - # ============================================================================ - # Post Actions - # ============================================================================ - # Post actions are executed after resources are created/updated - postActions: - # Report cluster status to HyperFleet API (always executed) - - name: "reportClusterStatus" - apiCall: - method: "POST" - # NOTE: API path includes /api/hyperfleet/ prefix and ends with /statuses - url: "{{ .hyperfleetApiBaseUrl }}/api/hyperfleet/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses" - body: "{{ .clusterStatusPayload }}" - timeout: 30s - retryAttempts: 3 - retryBackoff: "exponential" - headers: - - name: "Content-Type" - value: "application/json" diff --git a/configs/adapter-deployment-config.yaml b/configs/adapter-deployment-config.yaml deleted file mode 100644 index f14c5ac..0000000 --- a/configs/adapter-deployment-config.yaml +++ /dev/null @@ -1,120 +0,0 @@ -# HyperFleet Adapter Deployment Configuration -# -# This file contains ONLY infrastructure and deployment-related settings: -# - Client connections (Maestro, HyperFleet API, Kubernetes) -# - Authentication and TLS configuration -# - Connection timeouts and retry policies -# -# NOTE: This is a SAMPLE configuration file for reference and local development. -# It is NOT automatically packaged with the container image (see Dockerfile). -# -# In production, provide configuration via one of these methods: -# 1. ADAPTER_CONFIG_PATH environment variable pointing to a config file (highest priority) -# 2. ConfigMap mounted at /etc/adapter/config/adapter-deployment-config.yaml -# -# Example Kubernetes deployment: -# env: -# - name: ADAPTER_CONFIG_PATH -# value: /etc/adapter/config/adapter-deployment-config.yaml -# volumeMounts: -# - name: config -# mountPath: /etc/adapter/config -# -# For business logic configuration (params, preconditions, resources, post-actions), -# use a separate business config file. See configs/adapter-config-template.yaml - -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterConfig -metadata: - name: hyperfleet-adapter - labels: - hyperfleet.io/component: adapter - -spec: - adapter: - version: "0.1.0" - - # Log the full merged configuration after load (default: false) - # Environment variable: HYPERFLEET_DEBUG_CONFIG - # Flag: --debug-config - debugConfig: false - - # Client configurations for external services - clients: - # Maestro transport client configuration - maestro: - # gRPC server address - # Environment variable: HYPERFLEET_MAESTRO_GRPC_SERVER_ADDRESS - # Flag: --maestro-grpc-server-address - grpcServerAddress: "maestro-grpc.maestro.svc.cluster.local:8090" - - # HTTPS server address for REST API operations (optional) - # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS - httpServerAddress: "https://maestro-api.maestro.svc.cluster.local" - - # Source identifier for CloudEvents routing (must be unique across adapters) - # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID - sourceId: "hyperfleet-adapter" - - # Client identifier (defaults to sourceId if not specified) - # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID - clientId: "hyperfleet-adapter-client" - - # Authentication configuration - auth: - type: "tls" # TLS certificate-based mTLS - - tlsConfig: - # gRPC TLS configuration - # Certificate paths (mounted from Kubernetes secrets) - # Environment variable: HYPERFLEET_MAESTRO_CA_FILE - caFile: "/etc/maestro/certs/grpc/ca.crt" - - # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE - certFile: "/etc/maestro/certs/grpc/client.crt" - - # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE - keyFile: "/etc/maestro/certs/grpc/client.key" - - # Server name for TLS verification - # Environment variable: HYPERFLEET_MAESTRO_SERVER_NAME - serverName: "maestro-grpc.maestro.svc.cluster.local" - - # HTTP API TLS configuration (may use different CA than gRPC) - # If not set, falls back to caFile for backwards compatibility - # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE - httpCaFile: "/etc/maestro/certs/https/ca.crt" - - # Connection settings - timeout: "30s" - retryAttempts: 3 - retryBackoff: "exponential" - - # Keep-alive for long-lived gRPC connections - keepalive: - time: "30s" - timeout: "10s" - permitWithoutStream: true - - # HyperFleet HTTP API client - hyperfleetApi: - baseUrl: http://hyperfleet-api:8000 - version: v1 - timeout: 2s - retryAttempts: 3 - retryBackoff: exponential - - # Broker consumer configuration (adapter-level) - broker: - subscriptionId: "amarin-ns1-clusters-validation-gcp-adapter" - topic: "amarin-ns1-clusters" - - # Kubernetes client (for direct K8s resources) - kubernetes: - apiVersion: "v1" - # Uses in-cluster service account by default - # Set kubeConfigPath for out-of-cluster access - kubeConfigPath: PATH_TO_KUBECONFIG_FILE - # Optional rate limits (0 uses defaults) - qps: 100 - burst: 200 diff --git a/configs/adapter-task-config-template.yaml b/configs/adapter-task-config-template.yaml deleted file mode 100644 index eb0e6e3..0000000 --- a/configs/adapter-task-config-template.yaml +++ /dev/null @@ -1,296 +0,0 @@ -# HyperFleet Adapter Task Configuration Template (MVP) -# -# This is a Configuration Template for configuring cloud provider adapters -# using the HyperFleet Adapter Framework with CEL (Common Expression Language). -# -# TEMPLATE SYNTAX: -# ================ -# 1. Go Templates ({{ .var }}) - Variable interpolation throughout -# 2. field: "path" - Simple JSON path extraction (translated to CEL internally) -# 3. expression: "cel" - Full CEL expressions for complex logic -# -# CONDITION SYNTAX (when:): -# ========================= -# Option 1: Expression syntax (CEL) -# when: -# expression: | -# readyConditionStatus == "False" -# -# Option 2: Structured conditions (field + operator + value) -# when: -# conditions: -# - field: "readyConditionStatus" -# operator: "equals" -# value: "Terminating" -# -# Supported operators: equals, notEquals, in, notIn, contains, greaterThan, lessThan, exists -# -# CEL OPTIONAL CHAINING: -# ====================== -# Use optional chaining with orValue() to safely access potentially missing fields: -# resources.?clusterNamespace.?status.?phase.orValue("") -# adapter.?executionStatus.orValue("") -# -# Copy this file to your adapter repository and customize for your needs. - -apiVersion: hyperfleet.redhat.com/v1alpha1 -kind: AdapterTaskConfig -metadata: - # Adapter name (used as resource name and in logs/metrics) - name: example-adapter - labels: - hyperfleet.io/adapter-type: example - hyperfleet.io/component: adapter - -# ============================================================================ -# Task Specification -# ============================================================================ -spec: - # ============================================================================ - # Global params - # ============================================================================ - # params to extract from CloudEvent and environment variables - # - # SUPPORTED TYPES: - # ================ - # - string: Default, any value converted to string - # - int/int64: Integer value (strings parsed, floats truncated) - # - float/float64: Floating point value - # - bool: Boolean (supports: true/false, yes/no, on/off, 1/0) - # - params: - # Environment variables from deployment - - name: "hyperfleetApiBaseUrl" - source: "env.HYPERFLEET_API_BASE_URL" - type: "string" - description: "Base URL for the HyperFleet API" - required: true - - - name: "hyperfleetApiVersion" - source: "env.HYPERFLEET_API_VERSION" - type: "string" - default: "v1" - description: "API version to use" - - # Extract from CloudEvent data - - name: "clusterId" - source: "event.id" - type: "string" - description: "Unique identifier for the target cluster" - required: true - - # Example: Extract and convert to int - # - name: "nodeCount" - # source: "event.spec.nodeCount" - # type: "int" - # default: 3 - # description: "Number of nodes in the cluster" - - # Example: Extract and convert to bool - # - name: "enableFeature" - # source: "env.ENABLE_FEATURE" - # type: "bool" - # default: false - # description: "Enable experimental feature" - - - # ============================================================================ - # Global Preconditions - # ============================================================================ - # These preconditions run sequentially and validate cluster state before resource operations. - # - # DATA SCOPES: - # ============ - # Capture scope (field/expression): API response data only - # - Access: status.conditions, items[0].name, etc. - # - # Conditions scope (conditions/expression): Full execution context - # - params.* : Original extracted params - # - .*: Full API response (e.g., clusterStatus.status.conditions) - # - capturedField : Explicitly captured values - # - adapter.* : Adapter metadata - # - resources.* : Created resources (empty during preconditions) - # - preconditions: - # ========================================================================== - # Step 1: Get cluster status - # ========================================================================== - - name: "clusterStatus" - apiCall: - method: "GET" - # NOTE: API path includes /api/hyperfleet/ prefix - url: "{{ .hyperfleetApiBaseUrl }}/api/hyperfleet/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" - timeout: 10s - retryAttempts: 3 - retryBackoff: "exponential" - # Capture fields from the API response. Captured values become variables for use in resources section. - # SCOPE: API response data only - # Supports two modes: - # - field: Simple dot notation or JSONPath expression for extracting values - # - expression: CEL expression for computed values - # Only one of 'field' or 'expression' can be set per capture. - capture: - # Simple dot notation - - name: "clusterName" - field: "name" - - name: "readyConditionStatus" - expression: | - status.conditions.filter(c, c.type == "Ready").size() > 0 - ? status.conditions.filter(c, c.type == "Ready")[0].status - : "False" - - name: "generationId" - field: "generation" - - # JSONPath for complex extraction (filter by field value) - # See: https://kubernetes.io/docs/reference/kubectl/jsonpath/ - # - name: "lzNamespaceStatus" - # field: "{.items[?(@.adapter=='landing-zone-adapter')].data.namespace.status}" - - # CEL expression for computed values - # - name: "activeItemCount" - # expression: "items.filter(i, i.status == 'active').size()" - - # Conditions to check. SCOPE: Full execution context - # You can access: - # - Captured values: readyConditionStatus, clusterName, etc. - # - Full API response: clusterStatus.status.conditions, clusterStatus.spec.nodeCount - # - Params: clusterId, hyperfleetApiBaseUrl, etc. - conditions: - # Using captured value - - field: "readyConditionStatus" - operator: "equals" - value: "True" - - # Or dig directly into API response using precondition name - # - field: "clusterStatus.status.nodeCount" - # operator: "greaterThan" - # value: 0 - - # Alternative: CEL expression with full access - # expression: | - # clusterStatus.status.conditions.filter(c, c.type == "Ready")[0].status == "True" && - # clusterStatus.spec.nodeCount > 0 - - # ============================================================================ - # Resources (Create/Update Resources) - # ============================================================================ - # All resources are created/updated sequentially in the order defined below - resources: - # ========================================================================== - # Resource 1: Cluster Namespace - # ========================================================================== - - name: "clusterNamespace" - manifest: - apiVersion: v1 - kind: Namespace - metadata: - # Use | lower to ensure valid K8s resource name (lowercase RFC 1123) - name: "{{ .clusterId | lower }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - hyperfleet.io/resource-type: "namespace" - annotations: - hyperfleet.io/created-by: "hyperfleet-adapter" - hyperfleet.io/generation: "{{ .generationId }}" - discovery: - # The "namespace" field within discovery is optional: - # - For namespaced resources: set namespace to target the specific namespace - # - For cluster-scoped resources (like Namespace, ClusterRole): omit or leave empty - # Here we omit it since Namespace is cluster-scoped - bySelectors: - labelSelector: - hyperfleet.io/resource-type: "namespace" - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/managed-by: "{{ .metadata.name }}" - - - # ============================================================================ - # Post-Processing - # ============================================================================ - post: - payloads: - # Build status payload inline - - name: "clusterStatusPayload" - build: - # Adapter name for tracking which adapter reported this status - adapter: "{{ .metadata.name }}" - - # Conditions array - each condition has type, status, reason, message - # Use CEL optional chaining ?.orValue() for safe field access - conditions: - # Applied: Resources successfully created - - type: "Applied" - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" - ? "NamespaceCreated" - : "NamespacePending" - message: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" - ? "Namespace created successfully" - : "Namespace creation in progress" - - # Available: Resources are active and ready - - type: "Available" - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "True" : "False" - reason: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "NamespaceReady" : "NamespaceNotReady" - message: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") == "Active" ? "Namespace is active and ready" : "Namespace is not active and ready" - - # Health: Adapter execution status (runtime) Don't need to update this. This can be reused from the adapter config. - - type: "Health" - status: - expression: | - adapter.?executionStatus.orValue("") == "success" ? "True" : (adapter.?executionStatus.orValue("") == "failed" ? "False" : "Unknown") - reason: - expression: | - adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" - message: - expression: | - adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" - - # Use CEL expression for numeric fields to preserve type (not Go template which outputs strings) - observed_generation: - expression: "generationId" - - # Use Go template with now and date functions for timestamps - observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" - - # Optional data field for adapter-specific metrics extracted from resources - data: - namespace: - name: - expression: | - resources.?clusterNamespace.?metadata.?name.orValue("") - status: - expression: | - resources.?clusterNamespace.?status.?phase.orValue("") - - # ============================================================================ - # Post Actions - # ============================================================================ - # Post actions are executed after resources are created/updated - postActions: - # Report cluster status to HyperFleet API (always executed) - - name: "reportClusterStatus" - apiCall: - method: "POST" - # NOTE: API path includes /api/hyperfleet/ prefix and ends with /statuses - url: "{{ .hyperfleetApiBaseUrl }}/api/hyperfleet/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/statuses" - body: "{{ .clusterStatusPayload }}" - timeout: 30s - retryAttempts: 3 - retryBackoff: "exponential" - headers: - - name: "Content-Type" - value: "application/json" diff --git a/configs/broker-configmap-pubsub-template.yaml b/configs/broker-configmap-pubsub-template.yaml deleted file mode 100644 index 1fedc6d..0000000 --- a/configs/broker-configmap-pubsub-template.yaml +++ /dev/null @@ -1,187 +0,0 @@ -# Broker ConfigMap Template for Google Pub/Sub -# This ConfigMap provides broker configuration for the hyperfleet-adapter to consume CloudEvents from Google Pub/Sub -# -# Usage: -# 1. Copy this template and customize values for your environment -# 2. Apply to your Kubernetes cluster: kubectl apply -f broker-configmap-pubsub.yaml -# 3. Mount as a file in your adapter deployment (see example below) - -apiVersion: v1 -kind: ConfigMap -metadata: - name: hyperfleet-broker-config - namespace: hyperfleet-system - labels: - app.kubernetes.io/name: hyperfleet-adapter - app.kubernetes.io/component: broker-config - hyperfleet.io/broker-type: googlepubsub -data: - # ============================================================================ - # Broker Configuration (broker.yaml) - # ============================================================================ - # Note: Adapter broker topic/subscription are configured in the adapter - # deployment config under spec.clients.broker. - # This is the standard configuration format for hyperfleet-broker library - # Mount this file at /etc/broker/broker.yaml (or set BROKER_CONFIG_FILE) - # - # Note: You can override any setting using environment variables: - # BROKER_TYPE=googlepubsub - # BROKER_GOOGLEPUBSUB_PROJECT_ID=my-project - # SUBSCRIBER_PARALLELISM=5 - broker.yaml: | - # Set to true to log the loaded configuration on startup (useful for debugging) - log_config: false - - broker: - # Broker type: "rabbitmq" or "googlepubsub" - type: googlepubsub - - # Google Pub/Sub Configuration - googlepubsub: - # ==== Connection Settings (required) ==== - # GCP Project ID - project_id: "my-gcp-project" - - # ==== Subscription Settings ==== - # Time for subscriber to acknowledge message (10-600 seconds, default: 10) - ack_deadline_seconds: 60 - - # How long to retain unacknowledged messages (10m to 31d, default: 7d) - # Format: "Ns" (seconds), "Nm" (minutes), "Nh" (hours), "Nd" (days) - message_retention_duration: "604800s" # 7 days - - # Time of inactivity before subscription is deleted (min 1d, or 0 = never expire) - expiration_ttl: "2678400s" # 31 days - - # Enable ordered message delivery by ordering key - enable_message_ordering: false - - # ==== Retry Policy ==== - # Retry policy for failed message delivery (0s to 600s) - retry_min_backoff: "10s" - retry_max_backoff: "600s" - - # ==== Dead Letter Settings ==== - # Dead letter topic for messages that fail repeatedly - # If create_topic_if_missing is true, a dead letter topic named "{subscription_id}-dlq" - # will be created automatically - # dead_letter_topic: "my-dead-letter-topic" # Optional: customize dead letter topic name - dead_letter_max_attempts: 5 # 5-100, default: 5 - - # ==== Topic Settings ==== - # How long the topic retains messages for replay scenarios (0 = disabled) - # topic_retention_duration: "86400s" # 1 day - - # ==== Receive Settings (client-side flow control) ==== - max_outstanding_messages: 1000 - max_outstanding_bytes: 104857600 # 100MB - num_goroutines: 10 - - # ==== Behavior Flags ==== - # Default: false - infrastructure must exist (recommended for production) - # Set to true to automatically create topics/subscriptions if they don't exist - create_topic_if_missing: true - create_subscription_if_missing: true - - # Subscriber Configuration - subscriber: - # Number of parallel workers for processing messages (default: 1) - parallelism: 10 - ---- -# ============================================================================ -# Example Deployment -# ============================================================================ -# apiVersion: apps/v1 -# kind: Deployment -# metadata: -# name: hyperfleet-adapter -# namespace: hyperfleet-system -# spec: -# replicas: 1 -# selector: -# matchLabels: -# app: hyperfleet-adapter -# template: -# metadata: -# labels: -# app: hyperfleet-adapter -# spec: -# serviceAccountName: hyperfleet-adapter -# containers: -# - name: adapter -# image: quay.io/openshift-hyperfleet/hyperfleet-adapter:latest -# imagePullPolicy: Always -# env: -# # Adapter-specific configuration -# - name: BROKER_SUBSCRIPTION_ID -# valueFrom: -# configMapKeyRef: -# name: hyperfleet-broker-config -# key: BROKER_SUBSCRIPTION_ID -# - name: BROKER_TOPIC -# valueFrom: -# configMapKeyRef: -# name: hyperfleet-broker-config -# key: BROKER_TOPIC -# # Point to broker config file -# - name: BROKER_CONFIG_FILE -# value: /etc/broker/broker.yaml -# # Optional: Override broker.yaml settings with environment variables -# # - name: BROKER_GOOGLEPUBSUB_PROJECT_ID -# # value: "my-other-project" -# # - name: SUBSCRIBER_PARALLELISM -# # value: "5" -# volumeMounts: -# - name: broker-config -# mountPath: /etc/broker -# readOnly: true -# volumes: -# - name: broker-config -# configMap: -# name: hyperfleet-broker-config -# items: -# - key: broker.yaml -# path: broker.yaml - ---- -# ============================================================================ -# Optional: GCP Service Account Secret (if not using Workload Identity) -# ============================================================================ -# If running outside GKE or not using Workload Identity, create a secret -# with your GCP service account key: -# -# apiVersion: v1 -# kind: Secret -# metadata: -# name: gcp-service-account-key -# namespace: hyperfleet-system -# type: Opaque -# stringData: -# key.json: | -# { -# "type": "service_account", -# "project_id": "my-gcp-project", -# "private_key_id": "...", -# "private_key": "...", -# "client_email": "...", -# ... -# } -# -# Then mount it in your deployment: -# spec: -# template: -# spec: -# containers: -# - name: adapter -# env: -# - name: GOOGLE_APPLICATION_CREDENTIALS -# value: /var/secrets/google/key.json -# volumeMounts: -# - name: gcp-credentials -# mountPath: /var/secrets/google -# readOnly: true -# volumes: -# - name: gcp-credentials -# secret: -# secretName: gcp-service-account-key diff --git a/configs/templates/cluster-status-payload.yaml b/configs/templates/cluster-status-payload.yaml deleted file mode 100644 index c6b5f90..0000000 --- a/configs/templates/cluster-status-payload.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Cluster Status Payload Template -# Used for reporting cluster status back to HyperFleet API -status: "{{ .status }}" -message: "{{ .message }}" -observedGeneration: "{{ .generationSpec }}" -lastUpdated: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" -conditions: - - type: "Ready" - status: "{{ .readyStatus | default \"Unknown\" }}" - reason: "{{ .readyReason | default \"Pending\" }}" - message: "{{ .readyMessage | default \"Cluster status is being determined\" }}" - - type: "Configured" - status: "{{ .configuredStatus | default \"Unknown\" }}" - reason: "{{ .configuredReason | default \"Pending\" }}" - message: "{{ .configuredMessage | default \"Configuration is being applied\" }}" - diff --git a/configs/templates/deployment.yaml b/configs/templates/deployment.yaml deleted file mode 100644 index 4e76962..0000000 --- a/configs/templates/deployment.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Cluster Controller Deployment Template -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "cluster-controller-{{ .clusterId }}" - namespace: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/component: "controller" -spec: - replicas: 1 - selector: - matchLabels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/component: "controller" - template: - metadata: - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/component: "controller" - spec: - containers: - - name: controller - image: "quay.io/hyperfleet/controller:{{ .imageTag }}" - env: - - name: CLUSTER_ID - value: "{{ .clusterId }}" - - name: RESOURCE_ID - value: "{{ .resourceId }}" - resources: - requests: - cpu: "100m" - memory: "128Mi" - limits: - cpu: "500m" - memory: "512Mi" - diff --git a/configs/templates/job.yaml b/configs/templates/job.yaml deleted file mode 100644 index 196dcfe..0000000 --- a/configs/templates/job.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Validation Job Template -# This job is used to validate cluster configuration -apiVersion: batch/v1 -kind: Job -metadata: - name: "validation-{{ .clusterId }}" - namespace: "cluster-{{ .clusterId }}" - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/job-type: "validation" - hyperfleet.io/resource-type: "job" - hyperfleet.io/managed-by: "{{ .metadata.name }}" -spec: - template: - metadata: - labels: - hyperfleet.io/cluster-id: "{{ .clusterId }}" - hyperfleet.io/job-type: "validation" - spec: - restartPolicy: Never - containers: - - name: validator - image: "quay.io/hyperfleet/validator:v1.0.0" - env: - - name: CLUSTER_ID - value: "{{ .clusterId }}" - - name: GENERATION_ID - value: "{{ .generationSpec }}" - diff --git a/internal/config_loader/constants.go b/internal/config_loader/constants.go index 4e3aa31..367c0ea 100644 --- a/internal/config_loader/constants.go +++ b/internal/config_loader/constants.go @@ -78,6 +78,15 @@ const ( FieldManifest = "manifest" FieldRecreateOnChange = "recreateOnChange" FieldDiscovery = "discovery" + FieldTransport = "transport" +) + +// Transport field names +const ( + FieldClient = "client" + FieldMaestro = "maestro" + FieldTargetCluster = "targetCluster" + FieldManifestWork = "manifestWork" ) // Manifest reference field names diff --git a/internal/config_loader/loader.go b/internal/config_loader/loader.go index 30d3650..2d21822 100644 --- a/internal/config_loader/loader.go +++ b/internal/config_loader/loader.go @@ -198,17 +198,26 @@ func loadTaskConfigFileReferences(config *AdapterTaskConfig, baseDir string) err for i := range config.Spec.Resources { resource := &config.Spec.Resources[i] ref := resource.GetManifestRef() - if ref == "" { - continue - } + if ref != "" { + content, err := loadYAMLFile(baseDir, ref) + if err != nil { + return fmt.Errorf("%s.%s[%d].%s.%s: %w", FieldSpec, FieldResources, i, FieldManifest, FieldRef, err) + } - content, err := loadYAMLFile(baseDir, ref) - if err != nil { - return fmt.Errorf("%s.%s[%d].%s.%s: %w", FieldSpec, FieldResources, i, FieldManifest, FieldRef, err) + // Replace manifest with loaded content + resource.Manifest = content } - // Replace manifest with loaded content - resource.Manifest = content + // Load transport.maestro.manifestWork.ref + if resource.Transport != nil && resource.Transport.Maestro != nil && resource.Transport.Maestro.ManifestWork != nil { + if resource.Transport.Maestro.ManifestWork.Ref != "" { + content, err := loadYAMLFile(baseDir, resource.Transport.Maestro.ManifestWork.Ref) + if err != nil { + return fmt.Errorf("%s.%s[%d].%s.%s.%s.%s: %w", FieldSpec, FieldResources, i, FieldTransport, FieldMaestro, FieldManifestWork, FieldRef, err) + } + resource.Transport.Maestro.ManifestWork.RefContent = content + } + } } // Load buildRef in spec.post.payloads diff --git a/internal/config_loader/struct_validator.go b/internal/config_loader/struct_validator.go index f1bb3ae..9276fee 100644 --- a/internal/config_loader/struct_validator.go +++ b/internal/config_loader/struct_validator.go @@ -2,7 +2,6 @@ package config_loader import ( "fmt" - "os" "reflect" "regexp" "strings" @@ -97,21 +96,25 @@ func validateOperator(fl validator.FieldLevel) bool { } // validateParameterEnvRequired is a struct-level validator for Parameter. -// Checks that required env params have their environment variables set. +// Checks that required params have their source starting with "event.", "env.", or "config.". func validateParameterEnvRequired(sl validator.StructLevel) { param := sl.Current().Interface().(Parameter) //nolint:errcheck // type is guaranteed by RegisterStructValidation - // Only validate if Required=true and Source starts with "env." - if !param.Required || !strings.HasPrefix(param.Source, "env.") { + if !param.Required { return } - envName := strings.TrimPrefix(param.Source, "env.") - envValue := os.Getenv(envName) + validPrefixes := []string{"event.", "env.", "config."} + hasValidPrefix := false + for _, prefix := range validPrefixes { + if strings.HasPrefix(param.Source, prefix) { + hasValidPrefix = true + break + } + } - // Error if env var is not set and no default provided - if envValue == "" && param.Default == nil { - sl.ReportError(param.Source, "source", "Source", "envrequired", envName) + if !hasValidPrefix { + sl.ReportError(param.Source, "source", "Source", "invalidsourceprefix", param.Source) } } diff --git a/internal/config_loader/types.go b/internal/config_loader/types.go index 09c541f..22da3e3 100644 --- a/internal/config_loader/types.go +++ b/internal/config_loader/types.go @@ -296,11 +296,49 @@ func (c *Condition) UnmarshalYAML(unmarshal func(interface{}) error) error { // Resource represents a Kubernetes resource configuration type Resource struct { Name string `yaml:"name" validate:"required,resourcename"` + Transport *TransportConfig `yaml:"transport,omitempty"` Manifest interface{} `yaml:"manifest,omitempty" validate:"required"` RecreateOnChange bool `yaml:"recreateOnChange,omitempty"` Discovery *DiscoveryConfig `yaml:"discovery,omitempty" validate:"required"` } +// TransportClientType represents the transport client type +type TransportClientType string + +const ( + // TransportClientKubernetes indicates direct Kubernetes API transport + TransportClientKubernetes TransportClientType = "kubernetes" + // TransportClientMaestro indicates Maestro ManifestWork transport + TransportClientMaestro TransportClientType = "maestro" +) + +// TransportConfig defines transport configuration for a resource +type TransportConfig struct { + Client TransportClientType `yaml:"client,omitempty" validate:"omitempty,oneof=kubernetes maestro"` + Maestro *MaestroTransportConfig `yaml:"maestro,omitempty"` +} + +// GetClientType returns the transport client type, defaulting to "kubernetes" +func (t *TransportConfig) GetClientType() TransportClientType { + if t == nil || t.Client == "" { + return TransportClientKubernetes + } + return t.Client +} + +// MaestroTransportConfig contains Maestro-specific transport settings +type MaestroTransportConfig struct { + TargetCluster string `yaml:"targetCluster" validate:"required"` + ManifestWork *ManifestWorkConfig `yaml:"manifestWork,omitempty"` +} + +// ManifestWorkConfig contains ManifestWork-specific settings +type ManifestWorkConfig struct { + Ref string `yaml:"ref,omitempty"` + RefContent map[string]interface{} `yaml:"-"` + Name string `yaml:"name,omitempty"` +} + // DiscoveryConfig represents resource discovery configuration type DiscoveryConfig struct { Namespace string `yaml:"namespace,omitempty"` diff --git a/internal/config_loader/validator.go b/internal/config_loader/validator.go index b2b7fca..00b1642 100644 --- a/internal/config_loader/validator.go +++ b/internal/config_loader/validator.go @@ -122,6 +122,16 @@ func (v *TaskConfigValidator) ValidateFileReferences() error { errors = append(errors, err.Error()) } } + + // Validate transport.maestro.manifestWork.ref in spec.resources + if resource.Transport != nil && resource.Transport.Maestro != nil && resource.Transport.Maestro.ManifestWork != nil { + if resource.Transport.Maestro.ManifestWork.Ref != "" { + path := fmt.Sprintf("%s.%s[%d].%s.%s.%s.%s", FieldSpec, FieldResources, i, FieldTransport, FieldMaestro, FieldManifestWork, FieldRef) + if err := v.validateFileExists(resource.Transport.Maestro.ManifestWork.Ref, path); err != nil { + errors = append(errors, err.Error()) + } + } + } } if len(errors) > 0 { @@ -173,6 +183,7 @@ func (v *TaskConfigValidator) ValidateSemantic() error { v.validateTemplateVariables() v.validateCELExpressions() v.validateK8sManifests() + v.validateTransportConfig() if v.errors.HasErrors() { return v.errors @@ -530,6 +541,39 @@ func (v *TaskConfigValidator) validateK8sManifest(manifest map[string]interface{ } } +// validateTransportConfig validates transport configuration for all resources +func (v *TaskConfigValidator) validateTransportConfig() { + for i, resource := range v.config.Spec.Resources { + if resource.Transport == nil { + continue + } + + basePath := fmt.Sprintf("%s.%s[%d].%s", FieldSpec, FieldResources, i, FieldTransport) + + // Validate maestro config is present when client=maestro + if resource.Transport.Client == TransportClientMaestro { + if resource.Transport.Maestro == nil { + v.errors.Add(basePath, "maestro configuration is required when client is 'maestro'") + continue + } + + // Validate targetCluster is present and validate its template variables + maestroPath := basePath + "." + FieldMaestro + if resource.Transport.Maestro.TargetCluster == "" { + v.errors.Add(maestroPath, "targetCluster is required") + } else { + // Validate template variables in targetCluster + v.validateTemplateString(resource.Transport.Maestro.TargetCluster, maestroPath+"."+FieldTargetCluster) + } + + // Validate manifestWork.name template if present + if resource.Transport.Maestro.ManifestWork != nil && resource.Transport.Maestro.ManifestWork.Name != "" { + v.validateTemplateString(resource.Transport.Maestro.ManifestWork.Name, maestroPath+"."+FieldManifestWork+"."+FieldName) + } + } + } +} + // ============================================================================= // HELPER FUNCTIONS // ============================================================================= diff --git a/internal/config_loader/validator_test.go b/internal/config_loader/validator_test.go index 0cabf31..50a1b5a 100644 --- a/internal/config_loader/validator_test.go +++ b/internal/config_loader/validator_test.go @@ -559,3 +559,131 @@ func TestFieldNameCachePopulated(t *testing.T) { }) } } + +func TestTransportConfigGetClientType(t *testing.T) { + t.Run("nil transport returns kubernetes", func(t *testing.T) { + var tc *TransportConfig + assert.Equal(t, TransportClientKubernetes, tc.GetClientType()) + }) + + t.Run("empty client returns kubernetes", func(t *testing.T) { + tc := &TransportConfig{} + assert.Equal(t, TransportClientKubernetes, tc.GetClientType()) + }) + + t.Run("kubernetes client returns kubernetes", func(t *testing.T) { + tc := &TransportConfig{Client: TransportClientKubernetes} + assert.Equal(t, TransportClientKubernetes, tc.GetClientType()) + }) + + t.Run("maestro client returns maestro", func(t *testing.T) { + tc := &TransportConfig{Client: TransportClientMaestro} + assert.Equal(t, TransportClientMaestro, tc.GetClientType()) + }) +} + +func TestValidateTransportConfig(t *testing.T) { + // Helper to create resource with transport config + withTransport := func(transport *TransportConfig) *AdapterTaskConfig { + cfg := baseTaskConfig() + cfg.Spec.Params = []Parameter{ + {Name: "targetCluster", Source: "event.targetCluster"}, + } + cfg.Spec.Resources = []Resource{{ + Name: "testResource", + Transport: transport, + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-cm", + "namespace": "default", + }, + }, + Discovery: &DiscoveryConfig{ + Namespace: "default", + ByName: "test-cm", + }, + }} + return cfg + } + + t.Run("valid kubernetes transport", func(t *testing.T) { + cfg := withTransport(&TransportConfig{Client: TransportClientKubernetes}) + v := newTaskValidator(cfg) + require.NoError(t, v.ValidateStructure()) + require.NoError(t, v.ValidateSemantic()) + }) + + t.Run("valid nil transport defaults to kubernetes", func(t *testing.T) { + cfg := withTransport(nil) + v := newTaskValidator(cfg) + require.NoError(t, v.ValidateStructure()) + require.NoError(t, v.ValidateSemantic()) + }) + + t.Run("valid maestro transport", func(t *testing.T) { + cfg := withTransport(&TransportConfig{ + Client: TransportClientMaestro, + Maestro: &MaestroTransportConfig{ + TargetCluster: "{{ .targetCluster }}", + }, + }) + v := newTaskValidator(cfg) + require.NoError(t, v.ValidateStructure()) + require.NoError(t, v.ValidateSemantic()) + }) + + t.Run("valid maestro transport with manifestWork name", func(t *testing.T) { + cfg := withTransport(&TransportConfig{ + Client: TransportClientMaestro, + Maestro: &MaestroTransportConfig{ + TargetCluster: "{{ .targetCluster }}", + ManifestWork: &ManifestWorkConfig{ + Name: "work-{{ .targetCluster }}", + }, + }, + }) + v := newTaskValidator(cfg) + require.NoError(t, v.ValidateStructure()) + require.NoError(t, v.ValidateSemantic()) + }) + + t.Run("invalid maestro transport missing maestro config", func(t *testing.T) { + cfg := withTransport(&TransportConfig{ + Client: TransportClientMaestro, + }) + v := newTaskValidator(cfg) + require.NoError(t, v.ValidateStructure()) + // Semantic validation catches missing maestro config + err := v.ValidateSemantic() + require.Error(t, err) + assert.Contains(t, err.Error(), "maestro configuration is required") + }) + + t.Run("invalid maestro transport missing targetCluster", func(t *testing.T) { + cfg := withTransport(&TransportConfig{ + Client: TransportClientMaestro, + Maestro: &MaestroTransportConfig{}, + }) + v := newTaskValidator(cfg) + // Struct validation catches this via required tag on TargetCluster + err := v.ValidateStructure() + require.Error(t, err) + assert.Contains(t, err.Error(), "targetCluster") + }) + + t.Run("invalid maestro transport undefined template variable", func(t *testing.T) { + cfg := withTransport(&TransportConfig{ + Client: TransportClientMaestro, + Maestro: &MaestroTransportConfig{ + TargetCluster: "{{ .undefinedVar }}", + }, + }) + v := newTaskValidator(cfg) + require.NoError(t, v.ValidateStructure()) + err := v.ValidateSemantic() + require.Error(t, err) + assert.Contains(t, err.Error(), "undefined template variable") + }) +} diff --git a/internal/executor/executor.go b/internal/executor/executor.go index f5b4134..fab2833 100644 --- a/internal/executor/executor.go +++ b/internal/executor/executor.go @@ -11,6 +11,7 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/maestro_client" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" pkgotel "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/otel" "go.opentelemetry.io/otel" @@ -340,6 +341,12 @@ func (b *ExecutorBuilder) WithK8sClient(client k8s_client.K8sClient) *ExecutorBu return b } +// WithMaestroClient sets the Maestro ManifestWork client +func (b *ExecutorBuilder) WithMaestroClient(client maestro_client.ManifestWorkClient) *ExecutorBuilder { + b.config.MaestroClient = client + return b +} + // WithLogger sets the logger func (b *ExecutorBuilder) WithLogger(log logger.Logger) *ExecutorBuilder { b.config.Logger = log diff --git a/internal/executor/resource_executor.go b/internal/executor/resource_executor.go index 4e398ca..9ec9984 100644 --- a/internal/executor/resource_executor.go +++ b/internal/executor/resource_executor.go @@ -10,25 +10,31 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/generation" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/maestro_client" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/constants" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + workv1 "open-cluster-management.io/api/work/v1" + "sigs.k8s.io/yaml" ) // ResourceExecutor creates and updates Kubernetes resources type ResourceExecutor struct { - k8sClient k8s_client.K8sClient - log logger.Logger + k8sClient k8s_client.K8sClient + maestroClient maestro_client.ManifestWorkClient + log logger.Logger } // newResourceExecutor creates a new resource executor // NOTE: Caller (NewExecutor) is responsible for config validation func newResourceExecutor(config *ExecutorConfig) *ResourceExecutor { return &ResourceExecutor{ - k8sClient: config.K8sClient, - log: config.Logger, + k8sClient: config.K8sClient, + maestroClient: config.MaestroClient, + log: config.Logger, } } @@ -81,14 +87,25 @@ func (re *ResourceExecutor) executeResource(ctx context.Context, resource config re.log.Debugf(ctx, "Resource[%s] manifest built: namespace=%s", resource.Name, manifest.GetNamespace()) - // Step 2: Delegate to applyResource which handles discovery, generation comparison, and operations - return re.applyResource(ctx, resource, manifest, execCtx) + // Step 2: Check transport type and route to appropriate client + clientType := resource.Transport.GetClientType() + + switch clientType { + case config_loader.TransportClientKubernetes: + return re.applyResourceK8s(ctx, resource, manifest, execCtx) + case config_loader.TransportClientMaestro: + return re.applyResourceMaestro(ctx, resource, manifest, execCtx) + default: + result.Status = StatusFailed + result.Error = fmt.Errorf("unsupported transport client: %s", clientType) + return result, NewExecutorError(PhaseResources, resource.Name, "unsupported transport client", result.Error) + } } -// applyResource handles resource discovery, generation comparison, and execution of operations. +// applyResourceK8s handles resource discovery, generation comparison, and execution of operations via Kubernetes API. // It discovers existing resources (via Discovery config or by name), compares generations, // and performs the appropriate operation (create, update, recreate, or skip). -func (re *ResourceExecutor) applyResource(ctx context.Context, resource config_loader.Resource, manifest *unstructured.Unstructured, execCtx *ExecutionContext) (ResourceResult, error) { +func (re *ResourceExecutor) applyResourceK8s(ctx context.Context, resource config_loader.Resource, manifest *unstructured.Unstructured, execCtx *ExecutionContext) (ResourceResult, error) { result := ResourceResult{ Name: resource.Name, Kind: manifest.GetKind(), @@ -183,6 +200,10 @@ func (re *ResourceExecutor) applyResource(ctx context.Context, resource config_l errCtx = logger.WithErrorField(errCtx, err) re.log.Errorf(errCtx, "Resource[%s] processed: operation=%s reason=%s", resource.Name, result.Operation, result.OperationReason) + // Log the full manifest for debugging + if manifestYAML, marshalErr := yaml.Marshal(manifest.Object); marshalErr == nil { + re.log.Debugf(errCtx, "Resource[%s] failed manifest:\n%s", resource.Name, string(manifestYAML)) + } return result, NewExecutorError(PhaseResources, resource.Name, fmt.Sprintf("failed to %s resource", result.Operation), err) } @@ -199,6 +220,283 @@ func (re *ResourceExecutor) applyResource(ctx context.Context, resource config_l return result, nil } +// applyResourceMaestro handles resource delivery via Maestro ManifestWork. +// It builds a ManifestWork containing the manifest and applies it to the target cluster. +func (re *ResourceExecutor) applyResourceMaestro(ctx context.Context, resource config_loader.Resource, manifest *unstructured.Unstructured, execCtx *ExecutionContext) (ResourceResult, error) { + result := ResourceResult{ + Name: resource.Name, + Kind: manifest.GetKind(), + Namespace: manifest.GetNamespace(), + ResourceName: manifest.GetName(), + Status: StatusSuccess, + } + + // Validate maestro client is configured + if re.maestroClient == nil { + result.Status = StatusFailed + result.Error = fmt.Errorf("maestro client not configured") + return result, NewExecutorError(PhaseResources, resource.Name, "maestro client not configured", result.Error) + } + + // Validate maestro transport config is present + if resource.Transport == nil || resource.Transport.Maestro == nil { + result.Status = StatusFailed + result.Error = fmt.Errorf("maestro transport configuration missing") + return result, NewExecutorError(PhaseResources, resource.Name, "maestro transport configuration missing", result.Error) + } + + maestroConfig := resource.Transport.Maestro + + // Render targetCluster template + targetCluster, err := renderTemplate(maestroConfig.TargetCluster, execCtx.Params) + if err != nil { + result.Status = StatusFailed + result.Error = fmt.Errorf("failed to render targetCluster template: %w", err) + return result, NewExecutorError(PhaseResources, resource.Name, "failed to render targetCluster template", err) + } + + re.log.Debugf(ctx, "Resource[%s] using Maestro transport to cluster=%s", resource.Name, targetCluster) + + // Build ManifestWork from manifest + work, err := re.buildManifestWork(ctx, resource, manifest, execCtx) + if err != nil { + result.Status = StatusFailed + result.Error = err + return result, NewExecutorError(PhaseResources, resource.Name, "failed to build ManifestWork", err) + } + + re.log.Infof(ctx, "Resource[%s] applying ManifestWork via Maestro: name=%s targetCluster=%s", + resource.Name, work.Name, targetCluster) + + // Apply ManifestWork using maestro client + appliedWork, err := re.maestroClient.ApplyManifestWork(ctx, targetCluster, work) + if err != nil { + result.Status = StatusFailed + result.Error = err + // Set ExecutionError for Maestro operation failure + execCtx.Adapter.ExecutionError = &ExecutionError{ + Phase: string(PhaseResources), + Step: resource.Name, + Message: err.Error(), + } + errCtx := logger.WithK8sResult(ctx, "FAILED") + errCtx = logger.WithErrorField(errCtx, err) + re.log.Errorf(errCtx, "Resource[%s] ManifestWork apply failed: name=%s targetCluster=%s", + resource.Name, work.Name, targetCluster) + // Log the full manifest for debugging + if manifestYAML, marshalErr := yaml.Marshal(manifest.Object); marshalErr == nil { + re.log.Debugf(errCtx, "Resource[%s] failed manifest:\n%s", resource.Name, string(manifestYAML)) + } + // Also log the ManifestWork for debugging + if workYAML, marshalErr := yaml.Marshal(work); marshalErr == nil { + re.log.Debugf(errCtx, "Resource[%s] failed ManifestWork:\n%s", resource.Name, string(workYAML)) + } + return result, NewExecutorError(PhaseResources, resource.Name, "failed to apply ManifestWork", err) + } + + // Set operation info + result.Operation = generation.OperationCreate // ManifestWork apply is always an upsert + result.OperationReason = fmt.Sprintf("ManifestWork applied to cluster %s", targetCluster) + + successCtx := logger.WithK8sResult(ctx, "SUCCESS") + re.log.Infof(successCtx, "Resource[%s] ManifestWork applied: name=%s targetCluster=%s resourceVersion=%s", + resource.Name, appliedWork.Name, targetCluster, appliedWork.ResourceVersion) + + // Store the original manifest in execution context (not the ManifestWork) + // This allows post-processing to reference the resource by name + execCtx.Resources[resource.Name] = manifest + re.log.Debugf(ctx, "Resource stored in context as '%s'", resource.Name) + + return result, nil +} + +// buildManifestWork creates a ManifestWork containing the given manifest +func (re *ResourceExecutor) buildManifestWork(ctx context.Context, resource config_loader.Resource, manifest *unstructured.Unstructured, execCtx *ExecutionContext) (*workv1.ManifestWork, error) { + maestroConfig := resource.Transport.Maestro + + re.log.Debugf(ctx, "Building ManifestWork for resource[%s]: manifestWork=%v", resource.Name, maestroConfig.ManifestWork) + + // Determine ManifestWork name + workName := "" + if maestroConfig.ManifestWork != nil && maestroConfig.ManifestWork.Name != "" { + // Use configured name (with template rendering) + var err error + workName, err = renderTemplate(maestroConfig.ManifestWork.Name, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render manifestWork.name template: %w", err) + } + re.log.Debugf(ctx, "Using configured ManifestWork name: %s", workName) + } else { + // Generate name from resource name and manifest name + workName = fmt.Sprintf("%s-%s", resource.Name, manifest.GetName()) + re.log.Debugf(ctx, "Generated ManifestWork name: %s", workName) + } + + // Convert manifest to runtime.RawExtension + manifestBytes, err := manifest.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("failed to marshal manifest: %w", err) + } + + work := &workv1.ManifestWork{ + Spec: workv1.ManifestWorkSpec{ + Workload: workv1.ManifestsTemplate{ + Manifests: []workv1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: manifestBytes, + }, + }, + }, + }, + }, + } + work.SetName(workName) + + // Copy the generation annotation from the manifest to the ManifestWork + // This is required by the maestro client for generation-based idempotency + manifestAnnotations := manifest.GetAnnotations() + if manifestAnnotations != nil { + if gen, ok := manifestAnnotations[constants.AnnotationGeneration]; ok { + work.SetAnnotations(map[string]string{ + constants.AnnotationGeneration: gen, + }) + re.log.Debugf(ctx, "Set ManifestWork generation annotation: %s", gen) + } + } + + // Apply any additional settings from manifestWork.refContent if present + if maestroConfig.ManifestWork != nil && maestroConfig.ManifestWork.RefContent != nil { + re.log.Debugf(ctx, "Applying ManifestWork settings from RefContent: %v", maestroConfig.ManifestWork.RefContent) + if err := re.applyManifestWorkSettings(ctx, work, maestroConfig.ManifestWork.RefContent, execCtx.Params); err != nil { + return nil, fmt.Errorf("failed to apply manifestWork settings: %w", err) + } + } else if maestroConfig.ManifestWork != nil { + re.log.Debugf(ctx, "ManifestWork config present but RefContent is nil (Ref=%s)", maestroConfig.ManifestWork.Ref) + } + + return work, nil +} + +// applyManifestWorkSettings applies settings from the manifestWork ref file to the ManifestWork. +// The ref file can contain metadata (labels, annotations) and spec fields. +// Template variables in string values are rendered using the provided params. +func (re *ResourceExecutor) applyManifestWorkSettings(ctx context.Context, work *workv1.ManifestWork, settings map[string]interface{}, params map[string]interface{}) error { + re.log.Debugf(ctx, "Applying ManifestWork settings: keys=%v", getMapKeys(settings)) + + // Apply metadata if present + if metadata, ok := settings["metadata"].(map[string]interface{}); ok { + re.log.Debugf(ctx, "Found metadata in settings: %v", metadata) + + // Apply labels from metadata + if labels, ok := metadata["labels"].(map[string]interface{}); ok { + labelMap := make(map[string]string) + for k, v := range labels { + if str, ok := v.(string); ok { + rendered, err := renderTemplate(str, params) + if err != nil { + return fmt.Errorf("failed to render label value for key %s: %w", k, err) + } + labelMap[k] = rendered + } + } + work.SetLabels(labelMap) + re.log.Debugf(ctx, "Applied labels: %v", labelMap) + } + + // Apply annotations from metadata + if annotations, ok := metadata["annotations"].(map[string]interface{}); ok { + annotationMap := make(map[string]string) + for k, v := range annotations { + if str, ok := v.(string); ok { + rendered, err := renderTemplate(str, params) + if err != nil { + return fmt.Errorf("failed to render annotation value for key %s: %w", k, err) + } + annotationMap[k] = rendered + } + } + work.SetAnnotations(annotationMap) + re.log.Debugf(ctx, "Applied annotations: %v", annotationMap) + } + } + + // Also check for labels/annotations at root level (backwards compatibility) + if labels, ok := settings["labels"].(map[string]interface{}); ok { + labelMap := make(map[string]string) + for k, v := range labels { + if str, ok := v.(string); ok { + rendered, err := renderTemplate(str, params) + if err != nil { + return fmt.Errorf("failed to render label value for key %s: %w", k, err) + } + labelMap[k] = rendered + } + } + // Merge with existing labels + existing := work.GetLabels() + if existing == nil { + existing = make(map[string]string) + } + for k, v := range labelMap { + existing[k] = v + } + work.SetLabels(existing) + } + + if annotations, ok := settings["annotations"].(map[string]interface{}); ok { + annotationMap := make(map[string]string) + for k, v := range annotations { + if str, ok := v.(string); ok { + rendered, err := renderTemplate(str, params) + if err != nil { + return fmt.Errorf("failed to render annotation value for key %s: %w", k, err) + } + annotationMap[k] = rendered + } + } + // Merge with existing annotations + existing := work.GetAnnotations() + if existing == nil { + existing = make(map[string]string) + } + for k, v := range annotationMap { + existing[k] = v + } + work.SetAnnotations(existing) + } + + // Apply spec fields if present + if spec, ok := settings["spec"].(map[string]interface{}); ok { + re.log.Debugf(ctx, "Found spec in settings: keys=%v", getMapKeys(spec)) + + // Apply deleteOption if present + if deleteOption, ok := spec["deleteOption"].(map[string]interface{}); ok { + if propagationPolicy, ok := deleteOption["propagationPolicy"].(string); ok { + work.Spec.DeleteOption = &workv1.DeleteOption{ + PropagationPolicy: workv1.DeletePropagationPolicyType(propagationPolicy), + } + re.log.Debugf(ctx, "Applied deleteOption.propagationPolicy: %s", propagationPolicy) + } + } + + // Note: manifestConfigs and other complex spec fields would need + // proper type conversion. For now, we handle the most common cases. + // Additional spec fields can be added as needed. + } + + return nil +} + +// getMapKeys returns the keys of a map for debugging +func getMapKeys(m map[string]interface{}) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + return keys +} + // buildManifest builds an unstructured manifest from the resource configuration func (re *ResourceExecutor) buildManifest(ctx context.Context, resource config_loader.Resource, execCtx *ExecutionContext) (*unstructured.Unstructured, error) { var manifestData map[string]interface{} diff --git a/internal/executor/types.go b/internal/executor/types.go index 72f1f94..33889c1 100644 --- a/internal/executor/types.go +++ b/internal/executor/types.go @@ -10,6 +10,7 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/generation" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/maestro_client" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -62,6 +63,8 @@ type ExecutorConfig struct { APIClient hyperfleet_api.Client // K8sClient is the Kubernetes client K8sClient k8s_client.K8sClient + // MaestroClient is the Maestro ManifestWork client (optional, required if any resource uses maestro transport) + MaestroClient maestro_client.ManifestWorkClient // Logger is the logger instance Logger logger.Logger }