diff --git a/.gitignore b/.gitignore index 666a71a..949e4ab 100644 --- a/.gitignore +++ b/.gitignore @@ -39,11 +39,13 @@ vendor/ *.tmp *.bak *.log +.deploy-work/ # Environment files .env .env.local .env.*.local +deploy-scripts/.env # Claude Code files .claude/ diff --git a/deploy-scripts/.env.example b/deploy-scripts/.env.example new file mode 100644 index 0000000..f5d27d8 --- /dev/null +++ b/deploy-scripts/.env.example @@ -0,0 +1,79 @@ +# ============================================================================ +# CLM Components Deployment Configuration +# ============================================================================ +# This file contains environment variables used by deploy-clm.sh +# Copy this file to .env and modify values as needed for your environment: +# cp .env.example .env + +# Kubernetes Configuration +NAMESPACE=hyperfleet-e2e + +# Release Configuration +RELEASE_PREFIX=hyperfleet + +# Provider Configuration +GCP_PROJECT_ID=hcm-hyperfleet + +# Image Registry Configuration +IMAGE_REGISTRY=registry.ci.openshift.org/ci + +# API Component Configuration +API_IMAGE_REPO=hyperfleet-api +API_IMAGE_TAG=latest +API_SERVICE_TYPE=LoadBalancer + +# API Adapter Configuration (comma-separated list) +# NOTE: Adapters are auto-discovered from testdata/adapter-configs/ +# Only set these if you want to override the auto-discovery +# Example: API_ADAPTERS_CLUSTER="example1-namespace,validation,dns" +# API_ADAPTERS_CLUSTER="" +# API_ADAPTERS_NODEPOOL="" + +# Sentinel Component Configuration +SENTINEL_IMAGE_REPO=hyperfleet-sentinel +SENTINEL_IMAGE_TAG=latest +SENTINEL_BROKER_TYPE=googlepubsub +SENTINEL_GOOGLEPUBSUB_CREATE_TOPIC_IF_MISSING=true + +# Adapter Component Configuration +ADAPTER_IMAGE_REPO=hyperfleet-adapter +ADAPTER_IMAGE_TAG=latest +ADAPTER_GOOGLEPUBSUB_CREATE_TOPIC_IF_MISSING=true +ADAPTER_GOOGLEPUBSUB_CREATE_SUBSCRIPTION_IF_MISSING=true + +# Adapter Pub/Sub Configuration (Optional) +# If not set, these will be auto-generated based on namespace and resource type: +# ADAPTER_SUBSCRIPTION_ID: ${NAMESPACE}-${resource_type}-${adapter_name} +# ADAPTER_TOPIC: ${NAMESPACE}-${resource_type} +# ADAPTER_DEAD_LETTER_TOPIC: ${NAMESPACE}-${resource_type}-dlq +# Uncomment and set these to override the auto-generated values: +# ADAPTER_SUBSCRIPTION_ID="" +# ADAPTER_TOPIC="" +# ADAPTER_DEAD_LETTER_TOPIC="" + +# HyperFleet API Configuration +# Note: If namespace is different, use: http://hyperfleet-api.${NAMESPACE}.svc.cluster.local:8000 +API_BASE_URL=http://hyperfleet-api:8000 + + +# Helm Chart Sources +API_CHART_REPO=https://github.com/openshift-hyperfleet/hyperfleet-api.git +API_CHART_REF=main +API_CHART_PATH=charts + +SENTINEL_CHART_REPO=https://github.com/openshift-hyperfleet/hyperfleet-sentinel.git +SENTINEL_CHART_REF=main +SENTINEL_CHART_PATH=deployments/helm/sentinel + +ADAPTER_CHART_REPO=https://github.com/openshift-hyperfleet/hyperfleet-adapter.git +ADAPTER_CHART_REF=main +ADAPTER_CHART_PATH=charts + +# Component Selection Flags (true/false) +INSTALL_API=true +INSTALL_SENTINEL=true +INSTALL_ADAPTER=true + +# Execution Options +DRY_RUN=false +VERBOSE=false diff --git a/deploy-scripts/README.md b/deploy-scripts/README.md new file mode 100644 index 0000000..5a537c7 --- /dev/null +++ b/deploy-scripts/README.md @@ -0,0 +1,341 @@ +# CLM Deployment Scripts + +Automated deployment scripts for HyperFleet CLM (Cluster Lifecycle Management) components. + +## Overview + +The `deploy-clm.sh` script automates the installation and uninstallation of HyperFleet CLM components (API, Sentinel, and Adapters) using Helm for E2E testing environments. It provides a consistent and repeatable deployment process with comprehensive validation and error handling. + +## Features + +- **Install/Uninstall Operations**: Deploy or remove all CLM components with a single command +- **Configurable Components**: Install all components or selectively skip specific ones +- **Image Customization**: Configure custom image repositories and tags for each component +- **Helm Chart Management**: Automatically clone and use Helm charts from component repositories +- **Pod Health Verification**: Automatic verification of pod health after deployment with failure detection (CrashLoopBackOff, ImagePullBackOff, etc.) +- **Namespace Lifecycle**: Automatic namespace creation and deletion +- **Infrastructure Validation**: Pre-deployment checks for cluster readiness +- **Dry-Run Support**: Test deployment without making changes +- **Verbose Logging**: Detailed logging for troubleshooting +- **Error Handling**: Comprehensive validation and graceful error handling with automatic log retrieval on failures + +## Prerequisites + +The script requires the following tools to be installed: + +- `kubectl` - Kubernetes command-line tool +- `helm` - Helm package manager (v3+) +- `git` - Git version control + +Ensure you have: +- Valid kubeconfig with access to target cluster +- Appropriate RBAC permissions for namespace and resource management +- Network access to component Git repositories and image registries + +## Quick Start + +### Option 1: Using Command-Line Flags (Simple) + +Install all components with default settings: + +```bash +./deploy-scripts/deploy-clm.sh --action install --namespace hyperfleet-e2e +``` + +Install with custom image tags: + +```bash +./deploy-scripts/deploy-clm.sh --action install \ + --namespace my-test-env \ + --api-image-tag v1.2.0 \ + --sentinel-image-tag v1.2.0 \ + --adapter-image-tag v1.2.0 +``` + +Uninstall all components: + +```bash +./deploy-scripts/deploy-clm.sh --action uninstall --namespace hyperfleet-e2e +``` + +### Option 2: Using .env File (Recommended for Complex Configurations) + +For easier management of deployment parameters, use a `.env` file: + +1. **Copy the example configuration:** + ```bash + cd deploy-scripts/ + cp .env.example .env + ``` + +2. **Edit `.env` with your settings:** + ```bash + vim .env # or your preferred editor + ``` + + Key parameters you can configure: + - `NAMESPACE` - Kubernetes namespace (default: `hyperfleet-e2e`) + - `IMAGE_REGISTRY` - Container image registry + - `API_IMAGE_TAG`, `SENTINEL_IMAGE_TAG`, `ADAPTER_IMAGE_TAG` - Image tags + - `GCP_PROJECT_ID` - Google Cloud Project ID for Pub/Sub + - `INSTALL_API`, `INSTALL_SENTINEL`, `INSTALL_ADAPTER` - Component selection + + See [.env.example](.env.example) for all available parameters. + +3. **Run the deployment:** + ```bash + ./deploy-clm.sh --action install + ``` + +**Configuration Priority:** +- Command-line flags override .env file values +- .env file values override script defaults +- This allows baseline config in `.env` with per-run overrides via flags + +## Command-Line Reference + +For basic usage, see [Quick Start](#quick-start) section above. + +### Basic Syntax + +```bash +./deploy-scripts/deploy-clm.sh --action [OPTIONS] +``` + +### Required Flags + +| Flag | Description | +|------|-------------| +| `--action ` | Action to perform: `install` or `uninstall` | + +### Optional Flags + +#### General Options + +| Flag | Description | Default | +|------|-------------|---------| +| `--namespace ` | Kubernetes namespace for deployment | `hyperfleet-e2e` | +| `--dry-run` | Print commands without executing | `false` | +| `--verbose` | Enable verbose logging | `false` | +| `--help` | Show help message | - | + +#### Component Selection + +| Flag | Description | +|------|-------------| +| `--skip-api` | Skip API component installation | +| `--skip-sentinel` | Skip Sentinel component installation | +| `--skip-adapter` | Skip Adapter component installation | + +#### Image Configuration + +| Flag | Description | Default | +|------|-------------|---------| +| `--image-registry ` | Image registry for all components | `registry.ci.openshift.org/ci` | +| `--api-image-repo ` | API image repository (without registry) | `hyperfleet-api` | +| `--api-image-tag ` | API image tag | `latest` | +| `--sentinel-image-repo ` | Sentinel image repository (without registry) | `hyperfleet-sentinel` | +| `--sentinel-image-tag ` | Sentinel image tag | `latest` | +| `--adapter-image-repo ` | Adapter image repository (without registry) | `hyperfleet-adapter` | +| `--adapter-image-tag ` | Adapter image tag | `latest` | + +**Notes**: +- Helm chart sources are fixed and pulled from the official component repositories at the `main` branch +- Final image path format: `${IMAGE_REGISTRY}/${IMAGE_REPO}:${IMAGE_TAG}` +- Example: `registry.ci.openshift.org/ci/hyperfleet-api:latest` + +## Examples + +### Installation Examples + +#### 1. Install with Default Settings + +```bash +./deploy-scripts/deploy-clm.sh --action install --namespace hyperfleet-e2e +``` + +This installs all three components (API, Sentinel, Adapter) with default configurations. + +#### 2. Install Only API and Sentinel + +```bash +./deploy-scripts/deploy-clm.sh --action install \ + --namespace test-env \ + --skip-adapter +``` + +#### 3. Install with Custom Image Tags + +```bash +./deploy-scripts/deploy-clm.sh --action install \ + --namespace staging \ + --api-image-tag v1.2.0 \ + --sentinel-image-tag v1.2.0 \ + --adapter-image-tag v1.2.0 +``` + +#### 4. Install with Custom Image Repository + +```bash +./deploy-scripts/deploy-clm.sh --action install \ + --namespace dev-test \ + --api-image-repo myregistry.io/hyperfleet-api \ + --api-image-tag pr-123 +``` + +#### 5. Dry-Run Installation (No Changes) + +```bash +./deploy-scripts/deploy-clm.sh --action install \ + --namespace test \ + --dry-run \ + --verbose +``` + +This simulates the installation without making any actual changes. + +### Uninstallation Examples + +#### 1. Uninstall All Components + +```bash +./deploy-scripts/deploy-clm.sh --action uninstall --namespace hyperfleet-e2e +``` + +This removes all Helm releases. + +#### 2. Dry-Run Uninstallation + +```bash +./deploy-scripts/deploy-clm.sh --action uninstall \ + --namespace test-env \ + --dry-run \ + --verbose +``` + +#### 3. Uninstall Specific Components Only + +```bash +./deploy-scripts/deploy-clm.sh --action uninstall \ + --namespace test-env \ + --skip-api \ + --skip-sentinel +``` + +This only uninstalls the Adapter component. + +## Script Workflow + +### Installation Flow + +1. **Dependency Checks**: Validates that `kubectl`, `helm`, and `git` are available +2. **Context Validation**: Verifies kubectl context and cluster connectivity +3. **Chart Cloning**: Clones Helm charts from Git repositories +4. **Component Installation**: Installs components in order (API → Sentinel → Adapter) using `helm upgrade --install` with `--create-namespace` +5. **Pod Health Verification**: Verifies all pods are running and healthy (detects CrashLoopBackOff, ImagePullBackOff, etc.) +6. **Status Reporting**: Displays deployment status and usage instructions + +If any component fails health verification, the script automatically retrieves pod logs for troubleshooting and exits with an error status. + +### Uninstallation Flow + +1. **Dependency Checks**: Validates required tools +2. **Context Validation**: Verifies kubectl context +3. **User Confirmation**: Prompts for confirmation (unless `--dry-run`) +4. **Component Removal**: Uninstalls Helm releases in reverse order (Adapter → Sentinel → API) - this automatically removes all resources +5. **Cleanup**: Removes temporary working directories + +## Namespace Management + +The script leverages Helm's built-in namespace management: + +- **Installation**: Namespace is automatically created by Helm using the `--create-namespace` flag +- **Uninstallation**: Resources are removed by `helm uninstall`, but the namespace is **not deleted** + +If you want to completely remove the namespace after uninstallation: + +```bash +# Uninstall components +./deploy-scripts/deploy-clm.sh --action uninstall --namespace test-env + +# Manually delete namespace if desired +kubectl delete namespace test-env +``` + +This design allows you to: +- Reuse the same namespace for multiple install/uninstall cycles +- Keep other resources in the namespace that aren't managed by Helm +- Manually inspect resources after uninstallation for debugging + +## Troubleshooting + +### Debugging + +Use `--dry-run --verbose` flags to see what the script would do without making changes: + +```bash +./deploy-scripts/deploy-clm.sh --action install \ + --namespace test \ + --dry-run \ + --verbose +``` + +Check Helm deployment status: + +```bash +helm list -n +kubectl get pods -n +kubectl logs -n +``` + +View script execution with bash trace: + +```bash +bash -x deploy-scripts/deploy-clm.sh --action install --namespace test +``` + +## Integration with E2E Tests + +### Pre-Test Setup + +Before running E2E tests, deploy the CLM components: + +```bash +# Deploy test environment +./deploy-scripts/deploy-clm.sh --action install --namespace e2e-test + +# Configure E2E test API URL +EXTERNAL_IP=$(kubectl get svc hyperfleet-api -n $NAMESPACE_NAME -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +export HYPERFLEET_API_URL="http://${EXTERNAL_IP}:8000" + +# Run E2E tests +./bin/hyperfleet-e2e test --label-filter=tier0 +``` + +### Post-Test Cleanup + +After tests complete: + +```bash +./deploy-scripts/deploy-clm.sh --action uninstall --namespace e2e-test +``` + +## Script Output + +The script provides structured log output with the following levels: + +- **[INFO]**: Informational messages +- **[SUCCESS]**: Successful operations +- **[WARNING]**: Warnings (non-critical) +- **[ERROR]**: Errors (critical failures) +- **[VERBOSE]**: Detailed debug information (when `--verbose` is enabled) + +## Best Practices + +1. **Use Dry-Run First**: Always test with `--dry-run` before actual deployment +2. **Namespace Isolation**: Use dedicated namespaces for different test environments +3. **Tag Specificity**: Use specific image tags instead of `latest` for reproducible deployments +4. **Cleanup**: Always cleanup test environments after use to save resources +5. **Verbose Logging**: Use `--verbose` when troubleshooting issues +6. **Version Alignment**: Deploy matching versions of all components together + diff --git a/deploy-scripts/deploy-clm.sh b/deploy-scripts/deploy-clm.sh new file mode 100755 index 0000000..7df253e --- /dev/null +++ b/deploy-scripts/deploy-clm.sh @@ -0,0 +1,459 @@ +#!/usr/bin/env bash + +# deploy-clm.sh - Automated CLM Components Deployment Script +# +# This script automates the installation and uninstallation of HyperFleet CLM components +# (API, Sentinel, and Adapters) using Helm for E2E testing environments. +# +# Usage: +# ./deploy-clm.sh --action install --namespace hyperfleet-e2e +# ./deploy-clm.sh --action uninstall --namespace hyperfleet-e2e --dry-run + +set -euo pipefail + +# ============================================================================ +# Working Directories (must be set before loading .env) +# ============================================================================ +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" +WORK_DIR="${PROJECT_ROOT}/.deploy-work" +TESTDATA_DIR="${PROJECT_ROOT}/testdata" + +# ============================================================================ +# Load Environment Variables from .env file +# ============================================================================ +ENV_FILE="${SCRIPT_DIR}/.env" + +if [[ -f "${ENV_FILE}" ]]; then + set -a # automatically export all variables + source "${ENV_FILE}" + set +a +else + echo "[WARNING] .env file not found at ${ENV_FILE}" + echo "[WARNING] Using default configuration values" +fi + +# ============================================================================ +# Default Configuration (fallback if .env is not loaded) +# ============================================================================ + +ACTION="${ACTION:-}" +NAMESPACE="${NAMESPACE:-hyperfleet-e2e}" +DRY_RUN="${DRY_RUN:-false}" +VERBOSE="${VERBOSE:-false}" + +# Image Registry +IMAGE_REGISTRY="${IMAGE_REGISTRY:-registry.ci.openshift.org/ci}" + +# Provider Configuration +GCP_PROJECT_ID="${GCP_PROJECT_ID:-hcm-hyperfleet}" + +# API Component +API_IMAGE_REPO="${API_IMAGE_REPO:-hyperfleet-api}" +API_IMAGE_TAG="${API_IMAGE_TAG:-latest}" +API_SERVICE_TYPE="${API_SERVICE_TYPE:-LoadBalancer}" +API_ADAPTERS_CLUSTER="${API_ADAPTERS_CLUSTER:-}" +API_ADAPTERS_NODEPOOL="${API_ADAPTERS_NODEPOOL:-}" + +# Sentinel Component +SENTINEL_IMAGE_REPO="${SENTINEL_IMAGE_REPO:-hyperfleet-sentinel}" +SENTINEL_IMAGE_TAG="${SENTINEL_IMAGE_TAG:-latest}" +SENTINEL_BROKER_TYPE="${SENTINEL_BROKER_TYPE:-googlepubsub}" +SENTINEL_GOOGLEPUBSUB_CREATE_TOPIC_IF_MISSING="${SENTINEL_GOOGLEPUBSUB_CREATE_TOPIC_IF_MISSING:-true}" + +# Adapter Component +ADAPTER_IMAGE_REPO="${ADAPTER_IMAGE_REPO:-hyperfleet-adapter}" +ADAPTER_IMAGE_TAG="${ADAPTER_IMAGE_TAG:-latest}" +ADAPTER_GOOGLEPUBSUB_CREATE_TOPIC_IF_MISSING="${ADAPTER_GOOGLEPUBSUB_CREATE_TOPIC_IF_MISSING:-true}" +ADAPTER_GOOGLEPUBSUB_CREATE_SUBSCRIPTION_IF_MISSING="${ADAPTER_GOOGLEPUBSUB_CREATE_SUBSCRIPTION_IF_MISSING:-true}" + +# HyperFleet API Configuration +API_BASE_URL="${API_BASE_URL:-http://hyperfleet-api:8000}" + +# Release name prefix +RELEASE_PREFIX="${RELEASE_PREFIX:-hyperfleet}" + +# Helm Chart Sources +API_CHART_REPO="${API_CHART_REPO:-https://github.com/openshift-hyperfleet/hyperfleet-api.git}" +API_CHART_REF="${API_CHART_REF:-main}" +API_CHART_PATH="${API_CHART_PATH:-charts}" + +SENTINEL_CHART_REPO="${SENTINEL_CHART_REPO:-https://github.com/openshift-hyperfleet/hyperfleet-sentinel.git}" +SENTINEL_CHART_REF="${SENTINEL_CHART_REF:-main}" +SENTINEL_CHART_PATH="${SENTINEL_CHART_PATH:-deployments/helm/sentinel}" + +ADAPTER_CHART_REPO="${ADAPTER_CHART_REPO:-https://github.com/openshift-hyperfleet/hyperfleet-adapter.git}" +ADAPTER_CHART_REF="${ADAPTER_CHART_REF:-main}" +ADAPTER_CHART_PATH="${ADAPTER_CHART_PATH:-charts}" + +# Component flags +INSTALL_API="${INSTALL_API:-true}" +INSTALL_SENTINEL="${INSTALL_SENTINEL:-true}" +INSTALL_ADAPTER="${INSTALL_ADAPTER:-false}" + +# ============================================================================ +# Load Library Modules +# ============================================================================ + +source "${SCRIPT_DIR}/lib/common.sh" +source "${SCRIPT_DIR}/lib/helm.sh" +source "${SCRIPT_DIR}/lib/api.sh" +source "${SCRIPT_DIR}/lib/sentinel.sh" +source "${SCRIPT_DIR}/lib/adapter.sh" + +# ============================================================================ +# Usage and Argument Parsing +# ============================================================================ + +print_usage() { + cat << EOF +Usage: ${0##*/} --action [OPTIONS] + +Automated deployment script for HyperFleet CLM components (API, Sentinel, Adapter) + +CONFIGURATION: + This script loads configuration from ${SCRIPT_DIR}/.env file. + You can override any .env value using command-line flags. + +REQUIRED FLAGS: + --action Action to perform: install or uninstall + +OPTIONAL FLAGS: + --namespace Kubernetes namespace (default: hyperfleet-e2e) + + # Component Selection + --skip-api Skip API installation + --skip-sentinel Skip Sentinel installation + --skip-adapter Skip Adapter installation + + # Image Configuration + --image-registry Image registry (default: ${IMAGE_REGISTRY}) + --api-image-repo API image repository (default: ${API_IMAGE_REPO}) + --api-image-tag API image tag (default: ${API_IMAGE_TAG}) + --sentinel-image-repo Sentinel image repository (default: ${SENTINEL_IMAGE_REPO}) + --sentinel-image-tag Sentinel image tag (default: ${SENTINEL_IMAGE_TAG}) + --adapter-image-repo Adapter image repository (default: ${ADAPTER_IMAGE_REPO}) + --adapter-image-tag Adapter image tag (default: ${ADAPTER_IMAGE_TAG}) + + # API Configuration + --api-base-url HyperFleet API base URL for Sentinel and Adapter + (default: http://hyperfleet-api..svc.cluster.local:8000) + --api-adapters-cluster Comma-separated list of cluster adapters (e.g., "example1,validation") + --api-adapters-nodepool Comma-separated list of nodepool adapters (e.g., "validation,hypershift") + + # Release Configuration + --release-prefix Release name prefix (default: hyperfleet) + Components will be named: -api, -sentinel, -adapter + + # Execution Options + --dry-run Print commands without executing + --verbose Enable verbose logging + --help Show this help message + +ENVIRONMENT VARIABLES: + All configuration can be set in the .env file located at: ${SCRIPT_DIR}/.env + + Common variables: + - NAMESPACE Kubernetes namespace + - IMAGE_REGISTRY Container image registry + - API_IMAGE_TAG API image tag + - SENTINEL_IMAGE_TAG Sentinel image tag + - ADAPTER_IMAGE_TAG Adapter image tag + - GCP_PROJECT_ID Google Cloud Project ID for Pub/Sub + +EXAMPLES: + # Install all components with default settings + ${0##*/} --action install --namespace hyperfleet-e2e + + # Install with custom image tags + ${0##*/} --action install \\ + --namespace test-env \\ + --api-image-tag v1.0.0 \\ + --sentinel-image-tag v1.0.0 \\ + --adapter-image-tag v1.0.0 + + # Install only API and Sentinel + ${0##*/} --action install --skip-adapter + + # Dry-run uninstallation + ${0##*/} --action uninstall --namespace hyperfleet-e2e --dry-run --verbose + + # Install with custom image repositories + ${0##*/} --action install \\ + --api-image-repo myregistry.io/hyperfleet-api \\ + --api-image-tag dev-123 + +EOF +} + +parse_arguments() { + if [[ $# -eq 0 ]]; then + print_usage + exit 1 + fi + + while [[ $# -gt 0 ]]; do + case "$1" in + --action) + ACTION="$2" + shift 2 + ;; + --namespace) + NAMESPACE="$2" + shift 2 + ;; + --skip-api) + INSTALL_API=false + shift + ;; + --skip-sentinel) + INSTALL_SENTINEL=false + shift + ;; + --skip-adapter) + INSTALL_ADAPTER=false + shift + ;; + --image-registry) + IMAGE_REGISTRY="$2" + shift 2 + ;; + --api-image-repo) + API_IMAGE_REPO="$2" + shift 2 + ;; + --api-image-tag) + API_IMAGE_TAG="$2" + shift 2 + ;; + --sentinel-image-repo) + SENTINEL_IMAGE_REPO="$2" + shift 2 + ;; + --sentinel-image-tag) + SENTINEL_IMAGE_TAG="$2" + shift 2 + ;; + --adapter-image-repo) + ADAPTER_IMAGE_REPO="$2" + shift 2 + ;; + --adapter-image-tag) + ADAPTER_IMAGE_TAG="$2" + shift 2 + ;; + --api-base-url) + API_BASE_URL="$2" + shift 2 + ;; + --api-adapters-cluster) + API_ADAPTERS_CLUSTER="$2" + shift 2 + ;; + --api-adapters-nodepool) + API_ADAPTERS_NODEPOOL="$2" + shift 2 + ;; + --release-prefix) + RELEASE_PREFIX="$2" + shift 2 + ;; + --dry-run) + DRY_RUN=true + shift + ;; + --verbose) + VERBOSE=true + shift + ;; + --help|-h) + print_usage + exit 0 + ;; + *) + log_error "Unknown option: $1" + echo + print_usage + exit 1 + ;; + esac + done + + # Validate required arguments + if [[ -z "${ACTION}" ]]; then + log_error "Missing required flag: --action" + echo + print_usage + exit 1 + fi + + if [[ "${ACTION}" != "install" && "${ACTION}" != "uninstall" ]]; then + log_error "Invalid action: ${ACTION}. Must be 'install' or 'uninstall'" + exit 1 + fi + + # Validate at least one component is selected + if [[ "${INSTALL_API}" == "false" && "${INSTALL_SENTINEL}" == "false" && "${INSTALL_ADAPTER}" == "false" ]]; then + log_error "At least one component must be selected for installation" + exit 1 + fi +} + +# ============================================================================ +# Main Installation Flow +# ============================================================================ + +perform_install() { + log_section "Starting CLM Components Installation" + + # Validate environment + check_dependencies || exit 1 + validate_kubectl_context || exit 1 + + # Prepare working directory + log_section "Preparing Working Directory" + mkdir -p "${WORK_DIR}" + log_verbose "Work directory: ${WORK_DIR}" + + # Clone Helm charts + log_section "Cloning Helm Charts" + + if [[ "${INSTALL_API}" == "true" ]]; then + clone_helm_chart "api" "${API_CHART_REPO}" "${API_CHART_REF}" "${API_CHART_PATH}" || exit 1 + fi + + if [[ "${INSTALL_SENTINEL}" == "true" ]]; then + clone_helm_chart "sentinel" "${SENTINEL_CHART_REPO}" "${SENTINEL_CHART_REF}" "${SENTINEL_CHART_PATH}" || exit 1 + fi + + if [[ "${INSTALL_ADAPTER}" == "true" ]]; then + clone_helm_chart "adapter" "${ADAPTER_CHART_REPO}" "${ADAPTER_CHART_REF}" "${ADAPTER_CHART_PATH}" || exit 1 + fi + + # Install components in order: API -> Sentinel -> Adapter + if [[ "${INSTALL_API}" == "true" ]]; then + install_api || exit 1 + fi + + if [[ "${INSTALL_SENTINEL}" == "true" ]]; then + install_sentinel || exit 1 + fi + + if [[ "${INSTALL_ADAPTER}" == "true" ]]; then + install_adapters || { + log_error "Adapter installation failed" + log_section "Installation Failed" + exit 1 + } + fi + + # Final status + log_section "Installation Complete" + + if [[ "${DRY_RUN}" == "false" ]]; then + log_info "Deployed components:" + helm list -n "${NAMESPACE}" + + echo + log_info "Pod status:" + kubectl get pods -n "${NAMESPACE}" + + echo + log_success "All components installed successfully!" + log_info "Namespace: ${NAMESPACE}" + log_info "To view logs: kubectl logs -n ${NAMESPACE} -l app.kubernetes.io/name=" + + # Display API external IP if available + if [[ "${INSTALL_API}" == "true" ]]; then + local external_ip + external_ip=$(kubectl get svc "${RELEASE_PREFIX}-api" -n "${NAMESPACE}" -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null) + if [[ -n "${external_ip}" ]]; then + echo + log_info "HyperFleet API External IP: ${external_ip}" + log_info "API URL: http://${external_ip}:8000" + fi + fi + else + log_info "[DRY-RUN] Installation simulation complete" + fi + + # Clean up work directory + if [[ "${DRY_RUN}" == "false" && "${VERBOSE}" == "false" ]]; then + log_verbose "Cleaning up work directory" + rm -rf "${WORK_DIR}" + fi +} + +# ============================================================================ +# Main Uninstallation Flow +# ============================================================================ + +perform_uninstall() { + log_section "Starting CLM Components Uninstallation" + + # Validate environment + check_dependencies || exit 1 + validate_kubectl_context || exit 1 + + # Uninstall components in reverse order: Adapter -> Sentinel -> API + if [[ "${INSTALL_ADAPTER}" == "true" ]]; then + uninstall_adapters + fi + + if [[ "${INSTALL_SENTINEL}" == "true" ]]; then + uninstall_sentinel + fi + + if [[ "${INSTALL_API}" == "true" ]]; then + uninstall_api || log_warning "Failed to uninstall API" + fi + + # Final status + log_section "Uninstallation Complete" + + if [[ "${DRY_RUN}" == "false" ]]; then + log_success "All components uninstalled successfully!" + else + log_info "[DRY-RUN] Uninstallation simulation complete" + fi + + # Clean up work directory + if [[ -d "${WORK_DIR}" ]]; then + log_verbose "Cleaning up work directory" + rm -rf "${WORK_DIR}" + fi +} + +# ============================================================================ +# Main Entry Point +# ============================================================================ + +main() { + parse_arguments "$@" + + log_section "CLM Components Deployment Script" + log_info "Action: ${ACTION}" + log_info "Namespace: ${NAMESPACE}" + log_info "Dry-run: ${DRY_RUN}" + log_info "Verbose: ${VERBOSE}" + + if [[ "${VERBOSE}" == "true" ]]; then + echo + log_verbose "Component Configuration:" + log_verbose " API: ${INSTALL_API} (${API_IMAGE_REPO}:${API_IMAGE_TAG})" + log_verbose " Sentinel: ${INSTALL_SENTINEL} (${SENTINEL_IMAGE_REPO}:${SENTINEL_IMAGE_TAG})" + log_verbose " Adapter: ${INSTALL_ADAPTER} (${ADAPTER_IMAGE_REPO}:${ADAPTER_IMAGE_TAG})" + fi + + case "${ACTION}" in + install) + perform_install + ;; + uninstall) + perform_uninstall + ;; + esac +} + +# Run main function +main "$@" diff --git a/deploy-scripts/lib/adapter.sh b/deploy-scripts/lib/adapter.sh new file mode 100755 index 0000000..b715944 --- /dev/null +++ b/deploy-scripts/lib/adapter.sh @@ -0,0 +1,301 @@ +#!/usr/bin/env bash + +# adapter.sh - HyperFleet Adapter component deployment functions +# +# This module handles discovery, installation, and uninstallation of adapters +# from the testdata/adapter-configs directory + +# ============================================================================ +# Adapter Discovery Functions +# ============================================================================ + +discover_adapters() { + local adapter_configs_dir="${TESTDATA_DIR}/adapter-configs" + + if [[ ! -d "${adapter_configs_dir}" ]]; then + log_verbose "Adapter configs directory not found: ${adapter_configs_dir}" >&2 + return 1 + fi + + # Find all directories matching clusters-* or nodepools-* pattern + local adapter_dirs=() + while IFS= read -r -d '' dir; do + local basename=$(basename "$dir") + if [[ "$basename" =~ ^(clusters|nodepools)- ]]; then + adapter_dirs+=("$basename") + fi + done < <(find "${adapter_configs_dir}" -mindepth 1 -maxdepth 1 -type d -print0) + + if [[ ${#adapter_dirs[@]} -eq 0 ]]; then + log_verbose "No adapter configurations found (no clusters-* or nodepools-* directories)" >&2 + return 1 + fi + + log_info "Found ${#adapter_dirs[@]} adapter(s) to deploy:" >&2 + for dir in "${adapter_dirs[@]}"; do + log_info " - ${dir}" >&2 + done + + # Export for use in other functions + printf '%s\n' "${adapter_dirs[@]}" +} + +get_adapters_by_type() { + local resource_type="$1" # "clusters" or "nodepools" + local adapter_configs_dir="${TESTDATA_DIR}/adapter-configs" + + if [[ ! -d "${adapter_configs_dir}" ]]; then + return 1 + fi + + # Find all directories matching the resource type pattern + local adapter_names=() + while IFS= read -r -d '' dir; do + local basename=$(basename "$dir") + if [[ "$basename" =~ ^${resource_type}-(.+)$ ]]; then + # Extract just the adapter name (everything after "clusters-" or "nodepools-") + local adapter_name="${BASH_REMATCH[1]}" + adapter_names+=("${adapter_name}") + fi + done < <(find "${adapter_configs_dir}" -mindepth 1 -maxdepth 1 -type d -print0) + + if [[ ${#adapter_names[@]} -eq 0 ]]; then + return 1 + fi + + # Return comma-separated list + local IFS=',' + echo "${adapter_names[*]}" +} + +parse_adapter_name() { + local dir_name="$1" + + # Extract resource_type and adapter_name + # Format: - + # Examples: clusters-example1-namespace, nodepools-namespace + + if [[ "$dir_name" =~ ^(clusters|nodepools)-(.+)$ ]]; then + local resource_type="${BASH_REMATCH[1]}" + local adapter_name="${BASH_REMATCH[2]}" + + echo "${resource_type}|${adapter_name}" + else + log_error "Invalid adapter directory name format: ${dir_name}" + return 1 + fi +} + +# ============================================================================ +# Adapter Installation Functions +# ============================================================================ + +install_adapter_instance() { + local dir_name="$1" + + log_section "Installing Adapter: ${dir_name}" + + # Parse adapter name + local parsed + if ! parsed=$(parse_adapter_name "${dir_name}"); then + log_error "Failed to parse adapter directory name: ${dir_name}" + return 1 + fi + + local resource_type="${parsed%%|*}" + local adapter_name="${parsed##*|}" + + log_info "Resource type: ${resource_type}" + log_info "Adapter name: ${adapter_name}" + + # Construct release name + local release_name="${RELEASE_PREFIX}-adapter-${resource_type}-${adapter_name}" + + # Source adapter config directory + local adapter_configs_dir="${TESTDATA_DIR}/adapter-configs" + local source_adapter_dir="${adapter_configs_dir}/${dir_name}" + + if [[ ! -d "${source_adapter_dir}" ]]; then + log_error "Adapter config directory not found: ${source_adapter_dir}" + return 1 + fi + + # Chart path + local full_chart_path="${WORK_DIR}/adapter/${ADAPTER_CHART_PATH}" + + # Copy adapter config folder to chart directory + local dest_adapter_dir="${full_chart_path}/${dir_name}" + log_info "Copying adapter config from ${source_adapter_dir} to ${dest_adapter_dir}" + + if [[ -d "${dest_adapter_dir}" ]]; then + log_verbose "Removing existing adapter config directory: ${dest_adapter_dir}" + rm -rf "${dest_adapter_dir}" + fi + + cp -r "${source_adapter_dir}" "${dest_adapter_dir}" + + # Values file path (now in the chart directory) + local values_file="${dest_adapter_dir}/values.yaml" + if [[ ! -f "${values_file}" ]]; then + log_error "Values file not found: ${values_file}" + return 1 + fi + + # Construct subscription ID and topic names + # Allow override from environment variables, otherwise use auto-generated defaults + local subscription_id="${ADAPTER_SUBSCRIPTION_ID:-${NAMESPACE}-${resource_type}-${adapter_name}}" + local topic="${ADAPTER_TOPIC:-${NAMESPACE}-${resource_type}}" + local dead_letter_topic="${ADAPTER_DEAD_LETTER_TOPIC:-${NAMESPACE}-${resource_type}-dlq}" + + if [[ "${DRY_RUN}" == "true" ]]; then + log_info "[DRY-RUN] Would install adapter with:" + log_info " Release name: ${release_name}" + log_info " Namespace: ${NAMESPACE}" + log_info " Chart path: ${full_chart_path}" + log_info " Values file: ${values_file}" + log_info " Image: ${IMAGE_REGISTRY}/${ADAPTER_IMAGE_REPO}:${ADAPTER_IMAGE_TAG}" + log_info " Subscription ID: ${subscription_id}" + log_info " Topic: ${topic}" + log_info " Dead Letter Topic: ${dead_letter_topic}" + return 0 + fi + + + # Build helm command + local helm_cmd=( + helm upgrade --install + "${release_name}" + "${full_chart_path}" + --namespace "${NAMESPACE}" + --create-namespace + --wait + --timeout 5m + -f "${values_file}" + --set "image.registry=${IMAGE_REGISTRY}" + --set "image.repository=${ADAPTER_IMAGE_REPO}" + --set "image.tag=${ADAPTER_IMAGE_TAG}" + --set "broker.googlepubsub.projectId=${GCP_PROJECT_ID}" + --set "broker.googlepubsub.createTopicIfMissing=${ADAPTER_GOOGLEPUBSUB_CREATE_TOPIC_IF_MISSING}" + --set "broker.googlepubsub.createSubscriptionIfMissing=${ADAPTER_GOOGLEPUBSUB_CREATE_SUBSCRIPTION_IF_MISSING}" + --set "broker.googlepubsub.subscriptionId=${subscription_id}" + --set "broker.googlepubsub.topic=${topic}" + --set "broker.googlepubsub.deadLetterTopic=${dead_letter_topic}" + ) + + log_info "Executing Helm command:" + log_info "${helm_cmd[*]}" + echo + + if "${helm_cmd[@]}"; then + log_success "Adapter ${adapter_name} for ${resource_type} Helm release created successfully" + + # Verify pod health + log_info "Verifying pod health..." + if verify_pod_health "${NAMESPACE}" "app.kubernetes.io/instance=${release_name}" "${adapter_name}" 120 5; then + log_success "Adapter ${adapter_name} for ${resource_type} is running and healthy" + else + log_error "Adapter ${adapter_name} for ${resource_type} deployment failed health check" + log_info "Checking pod logs for troubleshooting:" + kubectl logs -n "${NAMESPACE}" -l "app.kubernetes.io/instance=${release_name}" --tail=50 2>/dev/null || true + return 1 + fi + else + log_error "Failed to install adapter ${adapter_name} for ${resource_type}" + return 1 + fi +} + +install_adapters() { + log_section "Deploying All Adapters" + + # Discover adapters + local adapters + if ! adapters=$(discover_adapters); then + log_warning "No adapters found to deploy" + return 0 + fi + + # Install each adapter + local failed=0 + while IFS= read -r adapter_dir; do + if ! install_adapter_instance "${adapter_dir}"; then + log_warning "Failed to install adapter: ${adapter_dir}" + ((failed++)) + fi + done <<< "${adapters}" + + if [[ ${failed} -gt 0 ]]; then + log_error "${failed} adapter(s) failed to install" + return 1 + else + log_success "All adapters deployed successfully" + fi +} + +# ============================================================================ +# Adapter Uninstallation Functions +# ============================================================================ + +uninstall_adapter_instance() { + local dir_name="$1" + + log_section "Uninstalling Adapter: ${dir_name}" + + # Parse adapter name + local parsed + if ! parsed=$(parse_adapter_name "${dir_name}"); then + log_error "Failed to parse adapter directory name: ${dir_name}" + return 1 + fi + + local resource_type="${parsed%%|*}" + local adapter_name="${parsed##*|}" + + # Construct release name + local release_name="${RELEASE_PREFIX}-adapter-${resource_type}-${adapter_name}" + + # Check if release exists + if ! helm list -n "${NAMESPACE}" 2>/dev/null | grep -q "^${release_name}"; then + log_warning "Release '${release_name}' not found in namespace '${NAMESPACE}'" + return 0 + fi + + if [[ "${DRY_RUN}" == "true" ]]; then + log_info "[DRY-RUN] Would uninstall adapter (release: ${release_name})" + return 0 + fi + + log_info "Uninstalling adapter ${adapter_name} for ${resource_type}..." + log_info "Executing: helm uninstall ${release_name} -n ${NAMESPACE} --wait --timeout 5m" + + if helm uninstall "${release_name}" -n "${NAMESPACE}" --wait --timeout 5m; then + log_success "Adapter ${adapter_name} for ${resource_type} uninstalled successfully" + else + log_error "Failed to uninstall adapter ${adapter_name} for ${resource_type}" + return 1 + fi +} + +uninstall_adapters() { + log_section "Uninstalling All Adapters" + + # Discover adapters + local adapters + if ! adapters=$(discover_adapters); then + log_warning "No adapters found to uninstall" + return 0 + fi + + # Uninstall each adapter + local failed=0 + while IFS= read -r adapter_dir; do + if ! uninstall_adapter_instance "${adapter_dir}"; then + log_warning "Failed to uninstall adapter: ${adapter_dir}" + ((failed++)) + fi + done <<< "${adapters}" + + if [[ ${failed} -gt 0 ]]; then + log_warning "${failed} adapter(s) failed to uninstall" + fi +} diff --git a/deploy-scripts/lib/api.sh b/deploy-scripts/lib/api.sh new file mode 100755 index 0000000..1c5e87d --- /dev/null +++ b/deploy-scripts/lib/api.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +# api.sh - HyperFleet API component deployment functions +# +# This module handles installation and uninstallation of the HyperFleet API component + +# ============================================================================ +# API Component Functions +# ============================================================================ + +install_api() { + log_section "Installing API" + + local release_name="${RELEASE_PREFIX}-api" + local full_chart_path="${WORK_DIR}/api/${API_CHART_PATH}" + + # Auto-discover adapters from testdata/adapter-configs + local discovered_cluster_adapters + local discovered_nodepool_adapters + + discovered_cluster_adapters=$(get_adapters_by_type "clusters") + discovered_nodepool_adapters=$(get_adapters_by_type "nodepools") + + # Use discovered adapters if available, otherwise fall back to env vars + local cluster_adapters="${discovered_cluster_adapters:-${API_ADAPTERS_CLUSTER}}" + local nodepool_adapters="${discovered_nodepool_adapters:-${API_ADAPTERS_NODEPOOL}}" + + if [[ "${DRY_RUN}" == "true" ]]; then + log_info "[DRY-RUN] Would install API with:" + log_info " Release name: ${release_name}" + log_info " Namespace: ${NAMESPACE}" + log_info " Chart path: ${full_chart_path}" + log_info " Image: ${IMAGE_REGISTRY}/${API_IMAGE_REPO}:${API_IMAGE_TAG}" + log_info " Service type: ${API_SERVICE_TYPE}" + [[ -n "${cluster_adapters}" ]] && log_info " Cluster adapters: ${cluster_adapters}" + [[ -n "${nodepool_adapters}" ]] && log_info " Nodepool adapters: ${nodepool_adapters}" + return 0 + fi + + log_info "Installing API..." + log_verbose "Release name: ${release_name}" + log_verbose "Image: ${IMAGE_REGISTRY}/${API_IMAGE_REPO}:${API_IMAGE_TAG}" + + # Build helm command with image overrides + local helm_cmd=( + helm upgrade --install + "${release_name}" + "${full_chart_path}" + --namespace "${NAMESPACE}" + --create-namespace + --wait + --timeout 3m + --set "image.registry=${IMAGE_REGISTRY}" + --set "image.repository=${API_IMAGE_REPO}" + --set "image.tag=${API_IMAGE_TAG}" + --set "service.type=${API_SERVICE_TYPE}" + ) + + # Add adapter configurations (always set both, use empty if not discovered) + # The API chart requires both adapters.cluster and adapters.nodepool to be set + if [[ -n "${cluster_adapters}" ]]; then + helm_cmd+=(--set "adapters.cluster={${cluster_adapters}}") + log_verbose "Cluster adapters (API): ${cluster_adapters}" + else + helm_cmd+=(--set "adapters.cluster={}") + log_verbose "Cluster adapters (API): none" + fi + + if [[ -n "${nodepool_adapters}" ]]; then + helm_cmd+=(--set "adapters.nodepool={${nodepool_adapters}}") + log_verbose "Nodepool adapters (API): ${nodepool_adapters}" + else + helm_cmd+=(--set "adapters.nodepool={}") + log_verbose "Nodepool adapters (API): none" + fi + + log_info "Executing: ${helm_cmd[*]}" + + if "${helm_cmd[@]}"; then + log_success "API Helm release created successfully" + + # Verify pod health + log_info "Verifying pod health..." + if verify_pod_health "${NAMESPACE}" "app.kubernetes.io/instance=${release_name}" "API" 120 5; then + log_success "API is running and healthy" + else + log_error "API deployment failed health check" + log_info "Checking pod logs for troubleshooting:" + kubectl logs -n "${NAMESPACE}" -l "app.kubernetes.io/instance=${release_name}" --tail=50 2>/dev/null || true + return 1 + fi + else + log_error "Failed to install API" + return 1 + fi +} + +uninstall_api() { + log_section "Uninstalling API" + + local release_name="${RELEASE_PREFIX}-api" + + # Check if release exists + if [[ -z "$(helm list -n "${NAMESPACE}" -q -f "^${release_name}$")" ]]; then + log_warning "Release '${release_name}' not found in namespace '${NAMESPACE}'" + return 0 + fi + + if [[ "${DRY_RUN}" == "true" ]]; then + log_info "[DRY-RUN] Would uninstall API (release: ${release_name})" + return 0 + fi + + log_info "Uninstalling API..." + log_info "Executing: helm uninstall ${release_name} -n ${NAMESPACE} --wait --timeout 5m" + + if helm uninstall "${release_name}" -n "${NAMESPACE}" --wait --timeout 5m; then + log_success "API uninstalled successfully" + else + log_error "Failed to uninstall API" + return 1 + fi +} diff --git a/deploy-scripts/lib/common.sh b/deploy-scripts/lib/common.sh new file mode 100755 index 0000000..d9f7222 --- /dev/null +++ b/deploy-scripts/lib/common.sh @@ -0,0 +1,185 @@ +#!/usr/bin/env bash + +# common.sh - Common utilities for CLM deployment scripts +# +# This module provides shared functionality used across all deployment scripts: +# - Logging functions +# - Dependency checking +# - Kubernetes context validation + +# ============================================================================ +# Logging Functions +# ============================================================================ + +log_info() { + echo "[INFO] $*" +} + +log_success() { + echo "[SUCCESS] $*" +} + +log_warning() { + echo "[WARNING] $*" +} + +log_error() { + echo "[ERROR] $*" >&2 +} + +log_verbose() { + if [[ "${VERBOSE}" == "true" ]]; then + echo "[VERBOSE] $*" + fi +} + +log_section() { + echo + echo "===================================================================" + echo "$*" + echo "===================================================================" +} + +# ============================================================================ +# Dependency Checking +# ============================================================================ + +check_dependencies() { + log_section "Checking Dependencies" + + local missing_deps=() + + local deps=("kubectl" "helm" "git") + for dep in "${deps[@]}"; do + if ! command -v "${dep}" &> /dev/null; then + missing_deps+=("${dep}") + log_error "Required dependency '${dep}' not found" + else + local version + case "${dep}" in + kubectl) + version=$(kubectl version --client --short 2>/dev/null | head -n1 || echo "unknown") + ;; + helm) + version=$(helm version --short 2>/dev/null || echo "unknown") + ;; + git) + version=$(git --version || echo "unknown") + ;; + esac + log_verbose "Found ${dep}: ${version}" + fi + done + + if [[ ${#missing_deps[@]} -gt 0 ]]; then + log_error "Missing required dependencies: ${missing_deps[*]}" + log_error "Please install the missing dependencies and try again" + return 1 + fi + + log_success "All dependencies are available" + return 0 +} + +# ============================================================================ +# Kubernetes Context Validation +# ============================================================================ + +validate_kubectl_context() { + log_section "Validating Kubernetes Context" + + if ! kubectl cluster-info &> /dev/null; then + log_error "Unable to connect to Kubernetes cluster" + log_error "Please ensure your kubeconfig is properly configured" + return 1 + fi + + local context + context=$(kubectl config current-context) + log_info "Current kubectl context: ${context}" + + local cluster_info + cluster_info=$(kubectl cluster-info 2>&1 | head -n1 || echo "unknown") + log_verbose "Cluster info: ${cluster_info}" + + log_success "Kubectl context validated" + return 0 +} + +# ============================================================================ +# Pod Health Verification +# ============================================================================ + +verify_pod_health() { + local namespace="$1" + local selector="$2" + local component_name="${3:-component}" + local timeout="${4:-60}" + local interval="${5:-5}" + + log_info "Verifying pod health for ${component_name}..." + log_verbose "Namespace: ${namespace}, Selector: ${selector}" + + local elapsed=0 + while [[ ${elapsed} -lt ${timeout} ]]; do + # Get pod status + local pod_status + pod_status=$(kubectl get pods -n "${namespace}" -l "${selector}" \ + -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.phase}{"\t"}{range .status.containerStatuses[*]}{.state.waiting.reason}{" "}{.state.terminated.reason}{end}{"\n"}{end}' 2>/dev/null) + + if [[ -z "${pod_status}" ]]; then + log_warning "No pods found with selector ${selector} in namespace ${namespace}" + sleep ${interval} + ((elapsed += interval)) + continue + fi + + # Check for failure states + local has_failures=false + local failure_details="" + + while IFS=$'\t' read -r pod_name phase reasons; do + log_verbose "Pod ${pod_name}: phase=${phase}, reasons=${reasons}" + + # Check for problematic states + if [[ "${phase}" == "Failed" ]] || \ + [[ "${reasons}" == *"CrashLoopBackOff"* ]] || \ + [[ "${reasons}" == *"Error"* ]] || \ + [[ "${reasons}" == *"ImagePullBackOff"* ]] || \ + [[ "${reasons}" == *"ErrImagePull"* ]]; then + has_failures=true + failure_details="${failure_details}\n - ${pod_name}: ${phase} (${reasons})" + fi + done <<< "${pod_status}" + + if [[ "${has_failures}" == "true" ]]; then + log_error "Pod health check failed for ${component_name}:" + echo -e "${failure_details}" + log_info "Pod details:" + kubectl get pods -n "${namespace}" -l "${selector}" + return 1 + fi + + # Check if all pods are running + local running_count + running_count=$(kubectl get pods -n "${namespace}" -l "${selector}" \ + -o jsonpath='{range .items[*]}{.status.phase}{"\n"}{end}' 2>/dev/null | grep -c "^Running$" || echo "0") + + local total_count + total_count=$(kubectl get pods -n "${namespace}" -l "${selector}" --no-headers 2>/dev/null | wc -l | tr -d ' ') + + if [[ ${running_count} -gt 0 ]] && [[ ${running_count} -eq ${total_count} ]]; then + log_success "All pods for ${component_name} are running (${running_count}/${total_count})" + return 0 + fi + + log_verbose "Waiting for pods to be ready: ${running_count}/${total_count} running (${elapsed}s/${timeout}s)" + sleep ${interval} + ((elapsed += interval)) + done + + log_error "Timeout waiting for ${component_name} pods to become healthy" + log_info "Current pod status:" + kubectl get pods -n "${namespace}" -l "${selector}" + return 1 +} diff --git a/deploy-scripts/lib/helm.sh b/deploy-scripts/lib/helm.sh new file mode 100755 index 0000000..a835f49 --- /dev/null +++ b/deploy-scripts/lib/helm.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# helm.sh - Helm chart management functions +# +# This module provides functions for cloning and managing Helm charts + +# ============================================================================ +# Helm Chart Management +# ============================================================================ + +clone_helm_chart() { + local component="$1" + local repo_url="$2" + local ref="$3" + local chart_path="$4" + + log_info "Cloning ${component} Helm chart from ${repo_url}@${ref} (sparse: ${chart_path})" + + local component_dir="${WORK_DIR}/${component}" + + if [[ -z "${WORK_DIR}" || "${WORK_DIR}" == "/" ]]; then + log_error "WORK_DIR must be set to a non-root directory" + return 1 + fi + if [[ -z "${component}" ]]; then + log_error "Component name is required" + return 1 + fi + + # Clean up any existing directory to ensure fresh clone + if [[ -d "${component_dir}" ]]; then + log_verbose "Removing existing directory: ${component_dir}" + rm -rf "${component_dir}" + fi + + if [[ "${DRY_RUN}" == "true" ]]; then + log_info "[DRY-RUN] Would clone (sparse): git clone --depth 1 --filter=blob:none --sparse --branch ${ref} ${repo_url}" + log_info "[DRY-RUN] Would checkout: ${chart_path}" + return 0 + fi + + # Clone with sparse checkout - only download the chart directory + log_verbose "Executing sparse checkout: git clone --depth 1 --filter=blob:none --sparse --no-checkout --branch ${ref} ${repo_url} ${component_dir}" + if ! git clone --depth 1 --filter=blob:none --sparse --no-checkout --branch "${ref}" "${repo_url}" "${component_dir}" >/dev/null 2>&1; then + log_error "Failed to clone ${component} Helm chart" + return 1 + fi + + # Configure sparse checkout to only include the chart path (no cone mode to avoid root files) + log_verbose "Configuring sparse checkout for: ${chart_path}" + if ! (cd "${component_dir}" && \ + git sparse-checkout init --no-cone >/dev/null 2>&1 && \ + git sparse-checkout set "${chart_path}" >/dev/null 2>&1 && \ + git checkout "${ref}" >/dev/null 2>&1); then + log_error "Failed to checkout chart path: ${chart_path}" + return 1 + fi + + # Verify chart path exists + local full_chart_path="${component_dir}/${chart_path}" + if [[ ! -f "${full_chart_path}/Chart.yaml" ]]; then + log_error "Chart.yaml not found at ${full_chart_path}" + log_error "Please verify the chart path is correct" + return 1 + fi + + log_success "Cloned ${component} Helm chart" + log_verbose "Chart location: ${full_chart_path}" +} diff --git a/deploy-scripts/lib/sentinel.sh b/deploy-scripts/lib/sentinel.sh new file mode 100755 index 0000000..c78b1e9 --- /dev/null +++ b/deploy-scripts/lib/sentinel.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash + +# sentinel.sh - HyperFleet Sentinel component deployment functions +# +# This module handles installation and uninstallation of HyperFleet Sentinel instances +# for both clusters and nodepools resource types + +# ============================================================================ +# Sentinel Component Functions +# ============================================================================ + +install_sentinel_instance() { + local resource_type="$1" # "clusters" or "nodepools" + + local component_name="Sentinel (${resource_type})" + local release_name="${RELEASE_PREFIX}-sentinel-${resource_type}" + local full_chart_path="${WORK_DIR}/sentinel/${SENTINEL_CHART_PATH}" + + log_section "Installing ${component_name}" + + # Determine API base URL + local api_url="${API_BASE_URL}" + + if [[ "${DRY_RUN}" == "true" ]]; then + log_info "[DRY-RUN] Would install ${component_name} with:" + log_info " Release name: ${release_name}" + log_info " Namespace: ${NAMESPACE}" + log_info " Chart path: ${full_chart_path}" + log_info " Image: ${IMAGE_REGISTRY}/${SENTINEL_IMAGE_REPO}:${SENTINEL_IMAGE_TAG}" + log_info " API base URL: ${api_url} (config.hyperfleetApi.baseUrl)" + log_info " Broker type: ${SENTINEL_BROKER_TYPE}" + log_info " Resource type: ${resource_type}" + log_info " Google Pub/Sub Project ID: ${GCP_PROJECT_ID}" + log_info " Google Pub/Sub Create Topic If Missing: ${SENTINEL_GOOGLEPUBSUB_CREATE_TOPIC_IF_MISSING}" + return 0 + fi + + log_info "Installing ${component_name}..." + log_verbose "Release name: ${release_name}" + log_verbose "Image: ${IMAGE_REGISTRY}/${SENTINEL_IMAGE_REPO}:${SENTINEL_IMAGE_TAG}" + log_verbose "API base URL: ${api_url}" + log_verbose "Resource type: ${resource_type}" + + # Build helm command + local helm_cmd=( + helm upgrade --install + "${release_name}" + "${full_chart_path}" + --namespace "${NAMESPACE}" + --create-namespace + --wait + --timeout 3m + --set "image.registry=${IMAGE_REGISTRY}" + --set "image.repository=${SENTINEL_IMAGE_REPO}" + --set "image.tag=${SENTINEL_IMAGE_TAG}" + --set "config.hyperfleetApi.baseUrl=${api_url}" + --set "config.resourceType=${resource_type}" + --set "broker.type=${SENTINEL_BROKER_TYPE}" + --set "broker.googlepubsub.projectId=${GCP_PROJECT_ID}" + --set "broker.googlepubsub.createTopicIfMissing=${SENTINEL_GOOGLEPUBSUB_CREATE_TOPIC_IF_MISSING}" + ) + + log_info "Executing: ${helm_cmd[*]}" + + if "${helm_cmd[@]}"; then + log_success "${component_name} Helm release created successfully" + + # Verify pod health + log_info "Verifying pod health..." + if verify_pod_health "${NAMESPACE}" "app.kubernetes.io/instance=${release_name}" "${component_name}" 120 5; then + log_success "${component_name} is running and healthy" + else + log_error "${component_name} deployment failed health check" + log_info "Checking pod logs for troubleshooting:" + kubectl logs -n "${NAMESPACE}" -l "app.kubernetes.io/instance=${release_name}" --tail=50 2>/dev/null || true + return 1 + fi + else + log_error "Failed to install ${component_name}" + return 1 + fi +} + +install_sentinel() { + + install_sentinel_instance "clusters" || return 1 + install_sentinel_instance "nodepools" || return 1 +} + +uninstall_sentinel_instance() { + local resource_type="$1" # "clusters" or "nodepools" + + # Capitalize first letter for display + local resource_type_display + if [[ "${resource_type}" == "clusters" ]]; then + resource_type_display="Clusters" + else + resource_type_display="Nodepools" + fi + + local component_name="Sentinel (${resource_type_display})" + local release_name="${RELEASE_PREFIX}-sentinel-${resource_type}" + + log_section "Uninstalling ${component_name}" + + # Check if release exists + if ! helm list -n "${NAMESPACE}" | grep -q "^${release_name}"; then + log_warning "Release '${release_name}' not found in namespace '${NAMESPACE}'" + return 0 + fi + + if [[ "${DRY_RUN}" == "true" ]]; then + log_info "[DRY-RUN] Would uninstall ${component_name} (release: ${release_name})" + return 0 + fi + + log_info "Uninstalling ${component_name}..." + log_info "Executing: helm uninstall ${release_name} -n ${NAMESPACE} --wait --timeout 5m" + + if helm uninstall "${release_name}" -n "${NAMESPACE}" --wait --timeout 5m; then + log_success "${component_name} uninstalled successfully" + else + log_error "Failed to uninstall ${component_name}" + return 1 + fi +} + +uninstall_sentinel() { + # Uninstall in reverse order + uninstall_sentinel_instance "nodepools" || log_warning "Failed to uninstall Sentinel (Nodepools)" + uninstall_sentinel_instance "clusters" || log_warning "Failed to uninstall Sentinel (Clusters)" +} diff --git a/testdata/adapter-configs/clusters-example1-namespace/adapter-config.yaml b/testdata/adapter-configs/clusters-example1-namespace/adapter-config.yaml new file mode 100644 index 0000000..52d5442 --- /dev/null +++ b/testdata/adapter-configs/clusters-example1-namespace/adapter-config.yaml @@ -0,0 +1,25 @@ +# Example HyperFleet Adapter deployment configuration +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterConfig +metadata: + name: example1-namespace + labels: + hyperfleet.io/adapter-type: example1-namespace + hyperfleet.io/component: adapter +spec: + adapter: + version: "0.1.0" + + # Log the full merged configuration after load (default: false) + debugConfig: false + + clients: + hyperfleetApi: + baseUrl: http://hyperfleet-api:8000 + version: v1 + timeout: 2s + retryAttempts: 3 + retryBackoff: exponential + + kubernetes: + apiVersion: "v1" diff --git a/testdata/adapter-configs/clusters-example1-namespace/adapter-task-config.yaml b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-config.yaml new file mode 100644 index 0000000..109e6b3 --- /dev/null +++ b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-config.yaml @@ -0,0 +1,216 @@ +# Example HyperFleet Adapter task configuration +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterTaskConfig +metadata: + name: example1-namespace + labels: + hyperfleet.io/adapter-type: example1-namespace + hyperfleet.io/component: adapter +spec: + # Parameters with all required variables + params: + - name: "hyperfleetApiBaseUrl" + source: "env.HYPERFLEET_API_BASE_URL" + type: "string" + required: true + + - name: "hyperfleetApiVersion" + source: "env.HYPERFLEET_API_VERSION" + type: "string" + default: "v1" + + - name: "clusterId" + source: "event.id" + type: "string" + required: true + + - name: "generation" + source: "event.generation" + type: "int" + required: true + + - name: "namespace" + source: "env.NAMESPACE" + type: "string" + required: true + + - name: "serviceAccountName" + source: "env.SERVICE_ACCOUNT" + type: "string" + required: true + + - name: "simulateResult" + source: "env.SIMULATE_RESULT" + type: "string" + required: true + + # Preconditions with valid operators and CEL expressions + preconditions: + - name: "clusterStatus" + apiCall: + method: "GET" + url: "/clusters/{{ .clusterId }}" + timeout: 10s + retryAttempts: 3 + retryBackoff: "exponential" + capture: + - name: "clusterName" + field: "name" + - name: "generationSpec" + field: "generation" + - name: "readyConditionStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + # Structured conditions with valid operators + conditions: + - field: "readyConditionStatus" + operator: "equals" + value: "False" + + - name: "validationCheck" + # Valid CEL expression + expression: | + readyConditionStatus == "False" + + # Resources with valid K8s manifests + resources: + - name: "clusterNamespace" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + annotations: + hyperfleet.io/generation: "{{ .generationSpec }}" + discovery: + namespace: "*" # Cluster-scoped resource (Namespace) + bySelectors: + labelSelector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/cluster-name: "{{ .clusterName }}" + + # the following configuration is for a job that will be created in the cluster + # in the namespace created above + # it will require a service account to be created in that namespace as well as a role and rolebinding + - name: "jobServiceAccount" + manifest: + ref: "/etc/adapter/job-serviceaccount.yaml" + discovery: + bySelectors: + labelSelector: + hyperfleet.io/resource-type: "service-account" + hyperfleet.io/cluster-id: "{{ .clusterId }}" + + - name: "job_role" + manifest: + ref: "/etc/adapter/job-role.yaml" + discovery: + bySelectors: + labelSelector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "role" + + - name: "job_rolebinding" + manifest: + ref: "/etc/adapter/job-rolebinding.yaml" + discovery: + bySelectors: + labelSelector: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "role-binding" + + - name: "jobNamespace" + manifest: + ref: "/etc/adapter/job.yaml" + discovery: + bySelectors: + labelSelector: + hyperfleet.io/generation: "{{ .generationSpec }}" + hyperfleet.io/cluster-id: "{{ .clusterId }}" + + # the following configuration is for a deployment that will be created in the cluster + # in the same namespace as the adapter + # and using the same service account as the adapter + + - name: "deploymentNamespace" + manifest: + ref: "/etc/adapter/deployment.yaml" + discovery: + bySelectors: + labelSelector: + hyperfleet.io/generation: "{{ .generationSpec }}" + hyperfleet.io/cluster-id: "{{ .clusterId }}" + + # Post-processing with valid CEL expressions + # This example contains multiple resources, we will only report on the conditions of the jobNamespace not to overcomplicate the example + post: + payloads: + - name: "clusterStatusPayload" + build: + adapter: "{{ .metadata.name }}" + conditions: + # Applied: Job successfully created + - type: "Applied" + status: + expression: | + resources.?jobNamespace.?spec.hasValue() ? "True" : "False" + reason: + expression: | + resources.?jobNamespace.?spec.hasValue() + ? "JobApplied" + : "JobPending" + message: + expression: | + resources.?jobNamespace.hasValue() + ? "jobNamespace manifest applied successfully" + : "jobNamespace is pending to be applied" + # Available: Check job status conditions + - type: "Available" + status: + expression: | + resources.?jobNamespace.?status.?conditions.hasValue() ? + ( resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Available") + ? resources.jobNamespace.status.conditions.filter(c, c.type == "Available")[0].status : "False") + : "Unknown" + reason: + expression: | + resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Available") + ? resources.jobNamespace.status.conditions.filter(c, c.type == "Available")[0].reason + : resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Failed") ? "ValidationFailed" + : resources.?jobNamespace.?status.hasValue() ? "ValidationInProgress" : "ValidationPending" + message: + expression: | + resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Available") + ? resources.jobNamespace.status.conditions.filter(c, c.type == "Available")[0].message + : resources.?jobNamespace.?status.?conditions.orValue([]).exists(c, c.type == "Failed") ? "Validation failed" + : resources.?jobNamespace.?status.hasValue() ? "Validation in progress" : "Validation is pending" + # Health: Adapter execution status (runtime) + - type: "Health" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" ? "True" : "False" + reason: + expression: | + adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" + message: + expression: | + adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations in progress or completed successfully" + # Event generation ID metadata field needs to use expression to avoid interpolation issues + observed_generation: + expression: "generationSpec" + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + + postActions: + - name: "reportClusterStatus" + apiCall: + method: "POST" + url: "/clusters/{{ .clusterId }}/statuses" + headers: + - name: "Content-Type" + value: "application/json" + body: "{{ .clusterStatusPayload }}" diff --git a/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-deployment.yaml b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-deployment.yaml new file mode 100644 index 0000000..42dfd18 --- /dev/null +++ b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-deployment.yaml @@ -0,0 +1,30 @@ +# Test deployment template +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "test-nginx-{{ .clusterId }}" + namespace: "{{ .namespace }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/managed-by: "{{ .metadata.name }}" + hyperfleet.io/generation: "{{ .generationSpec }}" + annotations: + hyperfleet.io/generation: "{{ .generationSpec }}" +spec: + replicas: 1 + selector: + matchLabels: + app: test + hyperfleet.io/cluster-id: "{{ .clusterId }}" + template: + metadata: + labels: + app: test + hyperfleet.io/cluster-id: "{{ .clusterId }}" + spec: + containers: + - name: test + image: nginx:latest + ports: + - containerPort: 80 + diff --git a/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job-role.yaml b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job-role.yaml new file mode 100644 index 0000000..051434e --- /dev/null +++ b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job-role.yaml @@ -0,0 +1,23 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: status-reporter + namespace: "{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "role" + hyperfleet.io/generation: "{{ .generationSpec }}" + annotations: + hyperfleet.io/generation: "{{ .generationSpec }}" +rules: + # Permission to get and update job status + - apiGroups: [ "batch" ] + resources: [ "jobs" ] + verbs: [ "get" ] + - apiGroups: [ "batch" ] + resources: [ "jobs/status" ] + verbs: [ "get", "update", "patch" ] + # Permission to get pod status + - apiGroups: [ "" ] + resources: [ "pods" ] + verbs: [ "get", "list" ] diff --git a/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job-rolebinding.yaml b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job-rolebinding.yaml new file mode 100644 index 0000000..db80304 --- /dev/null +++ b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job-rolebinding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: status-reporter + namespace: "{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "role-binding" + hyperfleet.io/generation: "{{ .generationSpec }}" + annotations: + hyperfleet.io/generation: "{{ .generationSpec }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: status-reporter +subjects: + - kind: ServiceAccount + name: "{{ .clusterId }}" + namespace: "{{ .clusterId }}" diff --git a/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job-serviceaccount.yaml b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job-serviceaccount.yaml new file mode 100644 index 0000000..bc991da --- /dev/null +++ b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job-serviceaccount.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ .clusterId }}" + namespace: "{{ .clusterId }}" + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/resource-type: "service-account" + hyperfleet.io/generation: "{{ .generationSpec }}" + annotations: + hyperfleet.io/generation: "{{ .generationSpec }}" diff --git a/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job.yaml b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job.yaml new file mode 100644 index 0000000..812974d --- /dev/null +++ b/testdata/adapter-configs/clusters-example1-namespace/adapter-task-resource-job.yaml @@ -0,0 +1,205 @@ +# Kubernetes Job template for GCP validator testing +# +# This template embeds the validate.sh script directly in the job spec, avoiding the need +# to build and push a container image for development. Uses a standard Alpine image. +# +# Prerequisites: +# Apply RBAC resources: kubectl apply -f rbac.yaml +# +# +# +apiVersion: batch/v1 +kind: Job +metadata: + name: example-job + namespace: "{{ .clusterId }}" # <- this gets resolved from adapterconfig params + labels: + hyperfleet.io/cluster-id: "{{ .clusterId }}" # <- this gets resolved from adapterconfig params + hyperfleet.io/generation: "{{ .generationSpec }}" # <- this gets resolved from adapterconfig params + annotations: + hyperfleet.io/cluster-id: "{{ .clusterId }}" # <- this gets resolved from adapterconfig params + hyperfleet.io/generation: "{{ .generationSpec }}" # <- this gets resolved from adapterconfig params +spec: + backoffLimit: 0 + activeDeadlineSeconds: 310 # 300 + 10 second buffer + template: + metadata: + labels: + app: example-job + job-name: example-job + spec: + serviceAccountName: "{{ .clusterId }}" + restartPolicy: Never + + # Volumes + volumes: + - name: results + emptyDir: {} + + containers: + - name: example-job + image: alpine:3.19 + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: + - | + set -e + + # Default configuration + RESULTS_PATH="${RESULTS_PATH:-/results/adapter-result.json}" + SIMULATE_RESULT="${SIMULATE_RESULT:-success}" + + echo "Dummy GCP Validator starting..." + echo "Simulating result: ${SIMULATE_RESULT}" + echo "Results path: ${RESULTS_PATH}" + + # Ensure results directory exists + RESULTS_DIR=$(dirname "${RESULTS_PATH}") + mkdir -p "${RESULTS_DIR}" + + case "${SIMULATE_RESULT}" in + success) + echo "Writing success result..." + cat > "${RESULTS_PATH}" < "${RESULTS_PATH}" < "${RESULTS_PATH}" + exit 0 + ;; + + missing-status) + echo "Writing result with missing status field..." + cat > "${RESULTS_PATH}" < "${RESULTS_PATH}" <